mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-10 06:35:43 +00:00
Compare commits
1 Commits
chat-route
...
better-gre
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72c506d8f6 |
39
.github/workflows/bench-manual.yml
vendored
39
.github/workflows/bench-manual.yml
vendored
@@ -1,27 +1,28 @@
|
|||||||
name: Bench (manual)
|
name: Bench (manual)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
workload:
|
workload:
|
||||||
description: "The path to the workloads to execute (workloads/...)"
|
description: 'The path to the workloads to execute (workloads/...)'
|
||||||
required: true
|
required: true
|
||||||
default: "workloads/movies.json"
|
default: 'workloads/movies.json'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmarks:
|
benchmarks:
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||||
|
run: |
|
||||||
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||||
|
|
||||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
|
||||||
run: |
|
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
|
||||||
|
|||||||
136
.github/workflows/bench-pr.yml
vendored
136
.github/workflows/bench-pr.yml
vendored
@@ -1,82 +1,82 @@
|
|||||||
name: Bench (PR)
|
name: Bench (PR)
|
||||||
on:
|
on:
|
||||||
issue_comment:
|
issue_comment:
|
||||||
types: [created]
|
types: [created]
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-benchmarks-on-comment:
|
run-benchmarks-on-comment:
|
||||||
if: startsWith(github.event.comment.body, '/bench')
|
if: startsWith(github.event.comment.body, '/bench')
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- name: Check permissions
|
- name: Check permissions
|
||||||
id: permission
|
id: permission
|
||||||
env:
|
env:
|
||||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||||
REPOSITORY: ${{github.repository}}
|
REPOSITORY: ${{github.repository}}
|
||||||
PR_ID: ${{github.event.issue.number}}
|
PR_ID: ${{github.event.issue.number}}
|
||||||
run: |
|
run: |
|
||||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::PR author authenticated"
|
echo "::notice title=Authentication success::PR author authenticated"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::Comment author authenticated"
|
echo "::notice title=Authentication success::Comment author authenticated"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::PR started from main repository"
|
echo "::notice title=Authentication success::PR started from main repository"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::PR started from a fork"
|
echo "::error title=Authentication error::PR started from a fork"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Check for Command
|
- name: Check for Command
|
||||||
id: command
|
id: command
|
||||||
uses: xt0rted/slash-command-action@v2
|
uses: xt0rted/slash-command-action@v2
|
||||||
with:
|
with:
|
||||||
command: bench
|
command: bench
|
||||||
reaction-type: "rocket"
|
reaction-type: "rocket"
|
||||||
repo-token: ${{ env.GH_TOKEN }}
|
repo-token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: xt0rted/pull-request-comment-branch@v3
|
- uses: xt0rted/pull-request-comment-branch@v3
|
||||||
id: comment-branch
|
id: comment-branch
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ env.GH_TOKEN }}
|
repo_token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
if: success()
|
if: success()
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||||
|
|
||||||
- name: Send comment in PR
|
- name: Send comment in PR
|
||||||
run: |
|
run: |
|
||||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||||
|
|||||||
33
.github/workflows/bench-push-indexing.yml
vendored
33
.github/workflows/bench-push-indexing.yml
vendored
@@ -1,22 +1,23 @@
|
|||||||
name: Indexing bench (push)
|
name: Indexing bench (push)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmarks:
|
benchmarks:
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
# Run benchmarks
|
||||||
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
run: |
|
||||||
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||||
|
|
||||||
# Run benchmarks
|
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
|
||||||
run: |
|
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
|
||||||
|
|||||||
8
.github/workflows/benchmarks-manual.yml
vendored
8
.github/workflows/benchmarks-manual.yml
vendored
@@ -4,9 +4,9 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
dataset_name:
|
dataset_name:
|
||||||
description: "The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)"
|
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||||
required: false
|
required: false
|
||||||
default: "search_songs"
|
default: 'search_songs'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||||
@@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ jobs:
|
|||||||
out_dir: critcmp_results
|
out_dir: critcmp_results
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
|||||||
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
|||||||
2
.github/workflows/check-valid-milestone.yml
vendored
2
.github/workflows/check-valid-milestone.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Validate PR milestone
|
- name: Validate PR milestone
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
|||||||
2
.github/workflows/db-change-comments.yml
vendored
2
.github/workflows/db-change-comments.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
|||||||
if: github.event.label.name == 'db change'
|
if: github.event.label.name == 'db change'
|
||||||
steps:
|
steps:
|
||||||
- name: Add comment
|
- name: Add comment
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v6
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
script: |
|
script: |
|
||||||
|
|||||||
2
.github/workflows/db-change-missing.yml
vendored
2
.github/workflows/db-change-missing.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
- name: Check db change labels
|
- name: Check db change labels
|
||||||
id: check_labels
|
id: check_labels
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/flaky-tests.yml
vendored
2
.github/workflows/flaky-tests.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
|
|||||||
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
|||||||
10
.github/workflows/milestone-workflow.yml
vendored
10
.github/workflows/milestone-workflow.yml
vendored
@@ -150,7 +150,6 @@ jobs:
|
|||||||
|
|
||||||
update-ruleset:
|
update-ruleset:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.event.action == 'created'
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Install jq
|
- name: Install jq
|
||||||
@@ -163,18 +162,15 @@ jobs:
|
|||||||
RULESET_ID: 4253297
|
RULESET_ID: 4253297
|
||||||
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
|
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
|
||||||
run: |
|
run: |
|
||||||
echo "RULESET_ID: ${{ env.RULESET_ID }}"
|
|
||||||
echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}"
|
|
||||||
|
|
||||||
# Get current ruleset conditions
|
# Get current ruleset conditions
|
||||||
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} --jq '{ conditions: .conditions }')
|
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/$RULESET_ID --jq '{ conditions: .conditions }')
|
||||||
|
|
||||||
# Update the conditions by appending the milestone version
|
# Update the conditions by appending the milestone version
|
||||||
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'${{ env.MILESTONE_VERSION }}'"]')
|
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'$MILESTONE_VERSION'"]')
|
||||||
|
|
||||||
# Update the ruleset from stdin (-)
|
# Update the ruleset from stdin (-)
|
||||||
echo $UPDATED_CONDITIONS |
|
echo $UPDATED_CONDITIONS |
|
||||||
gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} \
|
gh api repos/meilisearch/meilisearch/rulesets/$RULESET_ID \
|
||||||
--method PUT \
|
--method PUT \
|
||||||
-H "Accept: application/vnd.github+json" \
|
-H "Accept: application/vnd.github+json" \
|
||||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
|
|||||||
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|||||||
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@@ -45,7 +45,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
@@ -75,7 +75,7 @@ jobs:
|
|||||||
asset_name: meilisearch-windows-amd64.exe
|
asset_name: meilisearch-windows-amd64.exe
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
@@ -101,7 +101,7 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@@ -148,7 +148,7 @@ jobs:
|
|||||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
apt-get update -y && apt-get install -y docker-ce
|
apt-get update -y && apt-get install -y docker-ce
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
|
|||||||
17
.github/workflows/publish-docker-images.yml
vendored
17
.github/workflows/publish-docker-images.yml
vendored
@@ -104,20 +104,3 @@ jobs:
|
|||||||
repository: meilisearch/meilisearch-cloud
|
repository: meilisearch/meilisearch-cloud
|
||||||
event-type: cloud-docker-build
|
event-type: cloud-docker-build
|
||||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||||
|
|
||||||
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
|
||||||
- name: Send deployment to Swarmia
|
|
||||||
if: github.event_name == 'push' && success()
|
|
||||||
run: |
|
|
||||||
JSON_STRING=$( jq --null-input --compact-output \
|
|
||||||
--arg version "${{ github.ref_name }}" \
|
|
||||||
--arg appName "meilisearch" \
|
|
||||||
--arg environment "production" \
|
|
||||||
--arg commitSha "${{ github.sha }}" \
|
|
||||||
--arg repositoryFullName "${{ github.repository }}" \
|
|
||||||
'{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
|
||||||
|
|
||||||
curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "$JSON_STRING" \
|
|
||||||
https://hook.swarmia.com/deployments
|
|
||||||
|
|||||||
28
.github/workflows/sdks-tests.yml
vendored
28
.github/workflows/sdks-tests.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Define the Docker image we need to use
|
- name: Define the Docker image we need to use
|
||||||
id: define-image
|
id: define-image
|
||||||
run: |
|
run: |
|
||||||
@@ -46,7 +46,7 @@ jobs:
|
|||||||
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dotnet
|
repository: meilisearch/meilisearch-dotnet
|
||||||
- name: Setup .NET Core
|
- name: Setup .NET Core
|
||||||
@@ -75,7 +75,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dart
|
repository: meilisearch/meilisearch-dart
|
||||||
- uses: dart-lang/setup-dart@v1
|
- uses: dart-lang/setup-dart@v1
|
||||||
@@ -103,7 +103,7 @@ jobs:
|
|||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-go
|
repository: meilisearch/meilisearch-go
|
||||||
- name: Get dependencies
|
- name: Get dependencies
|
||||||
@@ -129,7 +129,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-java
|
repository: meilisearch/meilisearch-java
|
||||||
- name: Set up Java
|
- name: Set up Java
|
||||||
@@ -156,7 +156,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js
|
repository: meilisearch/meilisearch-js
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@@ -191,7 +191,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-php
|
repository: meilisearch/meilisearch-php
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
@@ -220,7 +220,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-python
|
repository: meilisearch/meilisearch-python
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
@@ -245,7 +245,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-ruby
|
repository: meilisearch/meilisearch-ruby
|
||||||
- name: Set up Ruby 3
|
- name: Set up Ruby 3
|
||||||
@@ -270,7 +270,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rust
|
repository: meilisearch/meilisearch-rust
|
||||||
- name: Build
|
- name: Build
|
||||||
@@ -291,7 +291,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-swift
|
repository: meilisearch/meilisearch-swift
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
@@ -314,7 +314,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js-plugins
|
repository: meilisearch/meilisearch-js-plugins
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@@ -345,7 +345,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rails
|
repository: meilisearch/meilisearch-rails
|
||||||
- name: Set up Ruby 3
|
- name: Set up Ruby 3
|
||||||
@@ -369,7 +369,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-symfony
|
repository: meilisearch/meilisearch-symfony
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
|
|||||||
28
.github/workflows/test-suite.yml
vendored
28
.github/workflows/test-suite.yml
vendored
@@ -21,15 +21,15 @@ jobs:
|
|||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-22.04 to compile with glibc 2.35
|
||||||
image: ubuntu:22.04
|
image: ubuntu:22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -51,8 +51,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -77,7 +77,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
||||||
@@ -91,7 +91,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v1
|
||||||
- name: Install Ollama
|
- name: Install Ollama
|
||||||
run: |
|
run: |
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
||||||
@@ -129,7 +129,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@@ -153,9 +153,9 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -167,12 +167,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -184,14 +184,14 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
toolchain: nightly-2024-07-09
|
toolchain: nightly-2024-07-09
|
||||||
override: true
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo fmt
|
- name: Run cargo fmt
|
||||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
new_version:
|
new_version:
|
||||||
description: "The new version (vX.Y.Z)"
|
description: 'The new version (vX.Y.Z)'
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@@ -18,7 +18,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
|
|||||||
771
Cargo.lock
generated
771
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.15.0"
|
version = "1.14.0"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Compile
|
# Compile
|
||||||
FROM rust:1.85-alpine3.20 AS compiler
|
FROM rust:1.81.0-alpine3.20 AS compiler
|
||||||
|
|
||||||
RUN apk add -q --no-cache build-base openssl-dev
|
RUN apk add -q --no-cache build-base openssl-dev
|
||||||
|
|
||||||
|
|||||||
10
README.md
10
README.md
@@ -23,6 +23,12 @@
|
|||||||
<a href="https://github.com/meilisearch/meilisearch/queue"><img alt="Merge Queues enabled" src="https://img.shields.io/badge/Merge_Queues-enabled-%2357cf60?logo=github"></a>
|
<a href="https://github.com/meilisearch/meilisearch/queue"><img alt="Merge Queues enabled" src="https://img.shields.io/badge/Merge_Queues-enabled-%2357cf60?logo=github"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<p align="center" name="ph-banner">
|
||||||
|
<a href="https://www.producthunt.com/posts/meilisearch-ai">
|
||||||
|
<img src="assets/ph-banner.png" alt="Meilisearch AI-powered search general availability announcement on ProductHunt">
|
||||||
|
</a>
|
||||||
|
</p>
|
||||||
|
|
||||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||||
|
|
||||||
[Meilisearch](https://www.meilisearch.com?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=intro) helps you shape a delightful search experience in a snap, offering features that work out of the box to speed up your workflow.
|
[Meilisearch](https://www.meilisearch.com?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=intro) helps you shape a delightful search experience in a snap, offering features that work out of the box to speed up your workflow.
|
||||||
@@ -41,7 +47,7 @@
|
|||||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||||
|
|
||||||
See the list of all our example apps in our [demos repository](https://github.com/meilisearch/demos).
|
See the list of all our example apps in our [demos repository](https://github.com/meilisearch/demos).
|
||||||
|
|
||||||
@@ -99,7 +105,7 @@ If you want to know more about the kind of data we collect and what we use it fo
|
|||||||
|
|
||||||
## 📫 Get in touch!
|
## 📫 Get in touch!
|
||||||
|
|
||||||
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
Meilisearch is a search engine created by [Meili]([https://www.welcometothejungle.com/en/companies/meilisearch](https://www.meilisearch.com/careers)), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||||
|
|
||||||
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||||
|
|
||||||
|
|||||||
@@ -1501,300 +1501,6 @@
|
|||||||
"title": "Task queue latency",
|
"title": "Task queue latency",
|
||||||
"type": "timeseries"
|
"type": "timeseries"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_used_size{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue used size in bytes",
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_size_until_stop_registering{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue available size until it stop receiving tasks.",
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_max_size{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue maximum possible size",
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"collapsed": true,
|
"collapsed": true,
|
||||||
"datasource": {
|
"datasource": {
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ anyhow = "1.0.95"
|
|||||||
bytes = "1.9.0"
|
bytes = "1.9.0"
|
||||||
convert_case = "0.6.0"
|
convert_case = "0.6.0"
|
||||||
flate2 = "1.0.35"
|
flate2 = "1.0.35"
|
||||||
reqwest = { version = "0.12.15", features = ["blocking", "rustls-tls"], default-features = false }
|
reqwest = { version = "0.12.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["milli/all-tokenizations"]
|
default = ["milli/all-tokenizations"]
|
||||||
|
|||||||
@@ -237,7 +237,7 @@ pub(crate) mod test {
|
|||||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
||||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
use meilisearch_types::tasks::{Details, Kind, Status};
|
||||||
use serde_json::{json, Map, Value};
|
use serde_json::{json, Map, Value};
|
||||||
use time::macros::datetime;
|
use time::macros::datetime;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@@ -326,7 +326,6 @@ pub(crate) mod test {
|
|||||||
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
||||||
progress_trace: Default::default(),
|
progress_trace: Default::default(),
|
||||||
write_channel_congestion: None,
|
write_channel_congestion: None,
|
||||||
internal_database_sizes: Default::default(),
|
|
||||||
},
|
},
|
||||||
enqueued_at: Some(BatchEnqueuedAt {
|
enqueued_at: Some(BatchEnqueuedAt {
|
||||||
earliest: datetime!(2022-11-11 0:00 UTC),
|
earliest: datetime!(2022-11-11 0:00 UTC),
|
||||||
@@ -334,7 +333,6 @@ pub(crate) mod test {
|
|||||||
}),
|
}),
|
||||||
started_at: datetime!(2022-11-20 0:00 UTC),
|
started_at: datetime!(2022-11-20 0:00 UTC),
|
||||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||||
stop_reason: BatchStopReason::Unspecified.to_string(),
|
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -373,7 +373,6 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
},
|
},
|
||||||
disable_on_words: typo.disable_on_words.into(),
|
disable_on_words: typo.disable_on_words.into(),
|
||||||
disable_on_attributes: typo.disable_on_attributes.into(),
|
disable_on_attributes: typo.disable_on_attributes.into(),
|
||||||
disable_on_numbers: v6::Setting::NotSet,
|
|
||||||
}),
|
}),
|
||||||
v5::Setting::Reset => v6::Setting::Reset,
|
v5::Setting::Reset => v6::Setting::Reset,
|
||||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||||
@@ -398,7 +397,6 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
search_cutoff_ms: v6::Setting::NotSet,
|
search_cutoff_ms: v6::Setting::NotSet,
|
||||||
facet_search: v6::Setting::NotSet,
|
facet_search: v6::Setting::NotSet,
|
||||||
prefix_search: v6::Setting::NotSet,
|
prefix_search: v6::Setting::NotSet,
|
||||||
chat: v6::Setting::NotSet,
|
|
||||||
_kind: std::marker::PhantomData,
|
_kind: std::marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ where
|
|||||||
/// not supported on untagged enums.
|
/// not supported on untagged enums.
|
||||||
struct StarOrVisitor<T>(PhantomData<T>);
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||||
where
|
where
|
||||||
T: FromStr<Err = FE>,
|
T: FromStr<Err = FE>,
|
||||||
FE: Display,
|
FE: Display,
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ impl Task {
|
|||||||
/// Return true when a task is finished.
|
/// Return true when a task is finished.
|
||||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
self.events.last().is_some_and(|event| {
|
self.events.last().map_or(false, |event| {
|
||||||
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
|
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -108,7 +108,7 @@ where
|
|||||||
/// not supported on untagged enums.
|
/// not supported on untagged enums.
|
||||||
struct StarOrVisitor<T>(PhantomData<T>);
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||||
where
|
where
|
||||||
T: FromStr<Err = FE>,
|
T: FromStr<Err = FE>,
|
||||||
FE: Display,
|
FE: Display,
|
||||||
|
|||||||
@@ -114,7 +114,7 @@ impl Task {
|
|||||||
/// Return true when a task is finished.
|
/// Return true when a task is finished.
|
||||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
self.events.last().is_some_and(|event| {
|
self.events.last().map_or(false, |event| {
|
||||||
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
|
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ impl<E> NomErrorExt<E> for nom::Err<E> {
|
|||||||
pub fn cut_with_err<'a, O>(
|
pub fn cut_with_err<'a, O>(
|
||||||
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||||
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
||||||
) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
|
) -> impl FnMut(Span<'a>) -> IResult<O> {
|
||||||
move |input| match parser.parse(input) {
|
move |input| match parser.parse(input) {
|
||||||
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
||||||
rest => rest,
|
rest => rest,
|
||||||
@@ -121,7 +121,7 @@ impl<'a> ParseError<Span<'a>> for Error<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for Error<'_> {
|
impl<'a> Display for Error<'a> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let input = self.context.fragment();
|
let input = self.context.fragment();
|
||||||
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ pub struct Token<'a> {
|
|||||||
value: Option<String>,
|
value: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for Token<'_> {
|
impl<'a> PartialEq for Token<'a> {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.span.fragment() == other.span.fragment()
|
self.span.fragment() == other.span.fragment()
|
||||||
}
|
}
|
||||||
@@ -226,7 +226,7 @@ impl<'a> FilterCondition<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error<'a>> {
|
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> {
|
||||||
if input.trim().is_empty() {
|
if input.trim().is_empty() {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
@@ -527,7 +527,7 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
|||||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for FilterCondition<'_> {
|
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
FilterCondition::Not(filter) => {
|
FilterCondition::Not(filter) => {
|
||||||
@@ -576,8 +576,7 @@ impl std::fmt::Display for FilterCondition<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<'a> std::fmt::Display for Condition<'a> {
|
||||||
impl std::fmt::Display for Condition<'_> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||||
@@ -595,8 +594,7 @@ impl std::fmt::Display for Condition<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<'a> std::fmt::Display for Token<'a> {
|
||||||
impl std::fmt::Display for Token<'_> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(f, "{{{}}}", self.value())
|
write!(f, "{{{}}}", self.value())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ fn quoted_by(quote: char, input: Span) -> IResult<Token> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
||||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<'a, Token<'a>> {
|
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> {
|
||||||
let (input, word): (_, Token<'a>) =
|
let (input, word): (_, Token<'a>) =
|
||||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||||
if is_keyword(word.value()) {
|
if is_keyword(word.value()) {
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.95"
|
anyhow = "1.0.95"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
byte-unit = "5.1.6"
|
|
||||||
bumpalo = "3.16.0"
|
bumpalo = "3.16.0"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
convert_case = "0.6.0"
|
convert_case = "0.6.0"
|
||||||
@@ -23,7 +22,6 @@ dump = { path = "../dump" }
|
|||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.0.35"
|
flate2 = "1.0.35"
|
||||||
indexmap = "2.7.0"
|
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.5"
|
memmap2 = "0.9.5"
|
||||||
@@ -47,7 +45,7 @@ uuid = { version = "1.11.0", features = ["serde", "v4"] }
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
crossbeam-channel = "0.5.15"
|
crossbeam-channel = "0.5.14"
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ use std::fmt::Display;
|
|||||||
|
|
||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::error::{Code, ErrorCode};
|
use meilisearch_types::error::{Code, ErrorCode};
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@@ -151,24 +150,8 @@ pub enum Error {
|
|||||||
CorruptedTaskQueue,
|
CorruptedTaskQueue,
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
DatabaseUpgrade(Box<Self>),
|
DatabaseUpgrade(Box<Self>),
|
||||||
#[error("Failed to rollback for index `{index}`: {rollback_outcome} ")]
|
|
||||||
RollbackFailed { index: String, rollback_outcome: RollbackOutcome },
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
UnrecoverableError(Box<Self>),
|
UnrecoverableError(Box<Self>),
|
||||||
#[error("The index scheduler is in version v{}.{}.{}, but Meilisearch is in version v{}.{}.{}.\n - hint: start the correct version of Meilisearch, or consider updating your database. See also <https://www.meilisearch.com/docs/learn/update_and_migration/updating>",
|
|
||||||
index_scheduler_version.0, index_scheduler_version.1, index_scheduler_version.2,
|
|
||||||
package_version.0, package_version.1, package_version.2)]
|
|
||||||
IndexSchedulerVersionMismatch {
|
|
||||||
index_scheduler_version: (u32, u32, u32),
|
|
||||||
package_version: (u32, u32, u32),
|
|
||||||
},
|
|
||||||
#[error("Index `{index}` is in version v{}.{}.{}, but Meilisearch is in version v{}.{}.{}.\n - note: this is an internal error, please consider filing a bug report: <https://github.com/meilisearch/meilisearch/issues/new?template=bug_report.md>",
|
|
||||||
index_version.0, index_version.1, index_version.2, package_version.0, package_version.1, package_version.2)]
|
|
||||||
IndexVersionMismatch {
|
|
||||||
index: String,
|
|
||||||
index_version: (u32, u32, u32),
|
|
||||||
package_version: (u32, u32, u32),
|
|
||||||
},
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
HeedTransaction(heed::Error),
|
HeedTransaction(heed::Error),
|
||||||
|
|
||||||
@@ -226,9 +209,6 @@ impl Error {
|
|||||||
| Error::CorruptedTaskQueue
|
| Error::CorruptedTaskQueue
|
||||||
| Error::DatabaseUpgrade(_)
|
| Error::DatabaseUpgrade(_)
|
||||||
| Error::UnrecoverableError(_)
|
| Error::UnrecoverableError(_)
|
||||||
| Error::IndexSchedulerVersionMismatch { .. }
|
|
||||||
| Error::IndexVersionMismatch { .. }
|
|
||||||
| Error::RollbackFailed { .. }
|
|
||||||
| Error::HeedTransaction(_) => false,
|
| Error::HeedTransaction(_) => false,
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
Error::PlannedFailure => false,
|
Error::PlannedFailure => false,
|
||||||
@@ -294,10 +274,7 @@ impl ErrorCode for Error {
|
|||||||
Error::CorruptedTaskQueue => Code::Internal,
|
Error::CorruptedTaskQueue => Code::Internal,
|
||||||
Error::CorruptedDump => Code::Internal,
|
Error::CorruptedDump => Code::Internal,
|
||||||
Error::DatabaseUpgrade(_) => Code::Internal,
|
Error::DatabaseUpgrade(_) => Code::Internal,
|
||||||
Error::RollbackFailed { .. } => Code::Internal,
|
|
||||||
Error::UnrecoverableError(_) => Code::Internal,
|
Error::UnrecoverableError(_) => Code::Internal,
|
||||||
Error::IndexSchedulerVersionMismatch { .. } => Code::Internal,
|
|
||||||
Error::IndexVersionMismatch { .. } => Code::Internal,
|
|
||||||
Error::CreateBatch(_) => Code::Internal,
|
Error::CreateBatch(_) => Code::Internal,
|
||||||
|
|
||||||
// This one should never be seen by the end user
|
// This one should never be seen by the end user
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ use meilisearch_types::heed::types::{SerdeJson, Str};
|
|||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::database_stats::DatabaseStats;
|
use meilisearch_types::milli::database_stats::DatabaseStats;
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
|
||||||
use meilisearch_types::milli::update::IndexerConfig;
|
use meilisearch_types::milli::update::IndexerConfig;
|
||||||
use meilisearch_types::milli::{FieldDistribution, Index};
|
use meilisearch_types::milli::{FieldDistribution, Index};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@@ -432,51 +431,6 @@ impl IndexMapper {
|
|||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rollback_index(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
name: &str,
|
|
||||||
to: (u32, u32, u32),
|
|
||||||
) -> Result<RollbackOutcome> {
|
|
||||||
// remove any currently updating index to make sure that we aren't keeping a reference to the index somewhere
|
|
||||||
drop(self.currently_updating_index.write().unwrap().take());
|
|
||||||
|
|
||||||
let uuid = self
|
|
||||||
.index_mapping
|
|
||||||
.get(rtxn, name)?
|
|
||||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
|
||||||
|
|
||||||
// take the lock to make sure noone is messing with the indexes while we rollback
|
|
||||||
// this will block any search or other operation, but we are rollbacking so this is probably acceptable.
|
|
||||||
let mut index_map = self.index_map.write().unwrap();
|
|
||||||
|
|
||||||
'close_index: loop {
|
|
||||||
match index_map.get(&uuid) {
|
|
||||||
Available(_) => {
|
|
||||||
index_map.close_for_resize(&uuid, self.enable_mdb_writemap, 0);
|
|
||||||
// index should now be `Closing`; try again
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// index already closed
|
|
||||||
Missing => break 'close_index,
|
|
||||||
// closing requested by this thread or another one; wait for closing to complete, then exit
|
|
||||||
Closing(closing_index) => {
|
|
||||||
if closing_index.wait_timeout(Duration::from_secs(100)).is_none() {
|
|
||||||
// release the lock so it doesn't get poisoned
|
|
||||||
drop(index_map);
|
|
||||||
panic!("cannot close index")
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
BeingDeleted => return Err(Error::IndexNotFound(name.to_string())),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let index_path = self.base_path.join(uuid.to_string());
|
|
||||||
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
|
|
||||||
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempts `f` for each index that exists in the index mapper.
|
/// Attempts `f` for each index that exists in the index mapper.
|
||||||
///
|
///
|
||||||
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
||||||
|
|||||||
@@ -41,8 +41,11 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
|
|
||||||
let indx_sched_version = version.get_version(&rtxn).unwrap();
|
let indx_sched_version = version.get_version(&rtxn).unwrap();
|
||||||
let latest_version =
|
let latest_version = (
|
||||||
(versioning::VERSION_MAJOR, versioning::VERSION_MINOR, versioning::VERSION_PATCH);
|
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||||
|
versioning::VERSION_MINOR.parse().unwrap(),
|
||||||
|
versioning::VERSION_PATCH.parse().unwrap(),
|
||||||
|
);
|
||||||
if indx_sched_version != Some(latest_version) {
|
if indx_sched_version != Some(latest_version) {
|
||||||
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
||||||
}
|
}
|
||||||
@@ -338,19 +341,9 @@ pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec
|
|||||||
|
|
||||||
pub fn snapshot_batch(batch: &Batch) -> String {
|
pub fn snapshot_batch(batch: &Batch) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let Batch {
|
let Batch { uid, details, stats, started_at, finished_at, progress: _, enqueued_at } = batch;
|
||||||
uid,
|
|
||||||
details,
|
|
||||||
stats,
|
|
||||||
started_at,
|
|
||||||
finished_at,
|
|
||||||
progress: _,
|
|
||||||
enqueued_at,
|
|
||||||
stop_reason,
|
|
||||||
} = batch;
|
|
||||||
let stats = BatchStats {
|
let stats = BatchStats {
|
||||||
progress_trace: Default::default(),
|
progress_trace: Default::default(),
|
||||||
internal_database_sizes: Default::default(),
|
|
||||||
write_channel_congestion: None,
|
write_channel_congestion: None,
|
||||||
..stats.clone()
|
..stats.clone()
|
||||||
};
|
};
|
||||||
@@ -365,7 +358,6 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
|||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||||
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
||||||
snap.push_str(&format!("stop reason: {}, ", serde_json::to_string(&stop_reason).unwrap()));
|
|
||||||
snap.push('}');
|
snap.push('}');
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ mod lru;
|
|||||||
mod processing;
|
mod processing;
|
||||||
mod queue;
|
mod queue;
|
||||||
mod scheduler;
|
mod scheduler;
|
||||||
mod settings;
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test_utils;
|
mod test_utils;
|
||||||
pub mod upgrade;
|
pub mod upgrade;
|
||||||
@@ -54,8 +53,8 @@ use flate2::Compression;
|
|||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::byteorder::BE;
|
use meilisearch_types::heed::byteorder::BE;
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str, I128};
|
use meilisearch_types::heed::types::I128;
|
||||||
use meilisearch_types::heed::{self, Database, Env, RoTxn, Unspecified, WithoutTls};
|
use meilisearch_types::heed::{self, Env, RoTxn, WithoutTls};
|
||||||
use meilisearch_types::milli::index::IndexEmbeddingConfig;
|
use meilisearch_types::milli::index::IndexEmbeddingConfig;
|
||||||
use meilisearch_types::milli::update::IndexerConfig;
|
use meilisearch_types::milli::update::IndexerConfig;
|
||||||
use meilisearch_types::milli::vector::{Embedder, EmbedderOptions, EmbeddingConfigs};
|
use meilisearch_types::milli::vector::{Embedder, EmbedderOptions, EmbeddingConfigs};
|
||||||
@@ -75,8 +74,6 @@ use crate::utils::clamp_to_page_size;
|
|||||||
|
|
||||||
pub(crate) type BEI128 = I128<BE>;
|
pub(crate) type BEI128 = I128<BE>;
|
||||||
|
|
||||||
const TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT: u64 = 40;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct IndexSchedulerOptions {
|
pub struct IndexSchedulerOptions {
|
||||||
/// The path to the version file of Meilisearch.
|
/// The path to the version file of Meilisearch.
|
||||||
@@ -143,8 +140,6 @@ pub struct IndexScheduler {
|
|||||||
/// The list of tasks currently processing
|
/// The list of tasks currently processing
|
||||||
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
||||||
|
|
||||||
/// The main database that also has the chat settings.
|
|
||||||
pub main: Database<Str, Unspecified>,
|
|
||||||
/// A database containing only the version of the index-scheduler
|
/// A database containing only the version of the index-scheduler
|
||||||
pub version: versioning::Versioning,
|
pub version: versioning::Versioning,
|
||||||
/// The queue containing both the tasks and the batches.
|
/// The queue containing both the tasks and the batches.
|
||||||
@@ -199,7 +194,7 @@ impl IndexScheduler {
|
|||||||
version: self.version.clone(),
|
version: self.version.clone(),
|
||||||
queue: self.queue.private_clone(),
|
queue: self.queue.private_clone(),
|
||||||
scheduler: self.scheduler.private_clone(),
|
scheduler: self.scheduler.private_clone(),
|
||||||
main: self.main.clone(),
|
|
||||||
index_mapper: self.index_mapper.clone(),
|
index_mapper: self.index_mapper.clone(),
|
||||||
cleanup_enabled: self.cleanup_enabled,
|
cleanup_enabled: self.cleanup_enabled,
|
||||||
webhook_url: self.webhook_url.clone(),
|
webhook_url: self.webhook_url.clone(),
|
||||||
@@ -270,7 +265,6 @@ impl IndexScheduler {
|
|||||||
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
|
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
|
||||||
let queue = Queue::new(&env, &mut wtxn, &options)?;
|
let queue = Queue::new(&env, &mut wtxn, &options)?;
|
||||||
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
|
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
|
||||||
let chat_settings = env.create_database(&mut wtxn, Some("chat-settings"))?;
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||||
@@ -294,7 +288,6 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
@@ -403,9 +396,9 @@ impl IndexScheduler {
|
|||||||
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
||||||
Ok(Err(e)) => {
|
Ok(Err(e)) => {
|
||||||
tracing::error!("{e}");
|
tracing::error!("{e}");
|
||||||
// Wait when an irrecoverable error occurs.
|
// Wait one second when an irrecoverable error occurs.
|
||||||
if !e.is_recoverable() {
|
if !e.is_recoverable() {
|
||||||
std::thread::sleep(Duration::from_secs(10));
|
std::thread::sleep(Duration::from_secs(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_panic) => {
|
Err(_panic) => {
|
||||||
@@ -432,17 +425,6 @@ impl IndexScheduler {
|
|||||||
Ok(self.env.non_free_pages_size()?)
|
Ok(self.env.non_free_pages_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the maximum possible database size
|
|
||||||
pub fn max_size(&self) -> Result<u64> {
|
|
||||||
Ok(self.env.info().map_size as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the max size of task allowed until the task queue stop receiving.
|
|
||||||
pub fn remaining_size_until_task_queue_stop(&self) -> Result<u64> {
|
|
||||||
Ok((self.env.info().map_size as u64 * TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT / 100)
|
|
||||||
.saturating_sub(self.used_size()?))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the index corresponding to the name.
|
/// Return the index corresponding to the name.
|
||||||
///
|
///
|
||||||
/// * If the index wasn't opened before, the index will be opened.
|
/// * If the index wasn't opened before, the index will be opened.
|
||||||
@@ -643,10 +625,9 @@ impl IndexScheduler {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty())
|
||||||
&& (self.env.non_free_pages_size()? * 100) / self.env.info().map_size as u64
|
&& (self.env.non_free_pages_size()? * 100) / self.env.info().map_size as u64 > 40
|
||||||
> TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT
|
|
||||||
{
|
{
|
||||||
return Err(Error::NoSpaceLeftInTaskQueue);
|
return Err(Error::NoSpaceLeftInTaskQueue);
|
||||||
}
|
}
|
||||||
@@ -715,7 +696,7 @@ impl IndexScheduler {
|
|||||||
written: usize,
|
written: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Read for TaskReader<'_, '_> {
|
impl<'a, 'b> Read for TaskReader<'a, 'b> {
|
||||||
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
|
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
|
||||||
if self.buffer.is_empty() {
|
if self.buffer.is_empty() {
|
||||||
match self.tasks.next() {
|
match self.tasks.next() {
|
||||||
@@ -862,18 +843,6 @@ impl IndexScheduler {
|
|||||||
.collect();
|
.collect();
|
||||||
res.map(EmbeddingConfigs::new)
|
res.map(EmbeddingConfigs::new)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chat_settings(&self) -> Result<Option<serde_json::Value>> {
|
|
||||||
let rtxn = self.env.read_txn().map_err(Error::HeedTransaction)?;
|
|
||||||
self.chat_settings.get(&rtxn, "main").map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn put_chat_settings(&self, settings: &serde_json::Value) -> Result<()> {
|
|
||||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
|
||||||
self.chat_settings.put(&mut wtxn, "main", settings)?;
|
|
||||||
wtxn.commit().map_err(Error::HeedTransaction)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The outcome of calling the [`IndexScheduler::tick`] function.
|
/// The outcome of calling the [`IndexScheduler::tick`] function.
|
||||||
|
|||||||
@@ -64,17 +64,9 @@ make_enum_progress! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
make_enum_progress! {
|
|
||||||
pub enum FinalizingIndexStep {
|
|
||||||
Committing,
|
|
||||||
ComputingStats,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_enum_progress! {
|
make_enum_progress! {
|
||||||
pub enum TaskCancelationProgress {
|
pub enum TaskCancelationProgress {
|
||||||
RetrievingTasks,
|
RetrievingTasks,
|
||||||
CancelingUpgrade,
|
|
||||||
UpdatingTasks,
|
UpdatingTasks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -182,7 +182,6 @@ impl BatchQueue {
|
|||||||
started_at: batch.started_at,
|
started_at: batch.started_at,
|
||||||
finished_at: batch.finished_at,
|
finished_at: batch.finished_at,
|
||||||
enqueued_at: batch.enqueued_at,
|
enqueued_at: batch.enqueued_at,
|
||||||
stop_reason: batch.reason.to_string(),
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ fn query_batches_simple() {
|
|||||||
batches[0].enqueued_at = None;
|
batches[0].enqueued_at = None;
|
||||||
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
||||||
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
||||||
snapshot!(batch, @r###"
|
snapshot!(batch, @r#"
|
||||||
{
|
{
|
||||||
"uid": 0,
|
"uid": 0,
|
||||||
"details": {
|
"details": {
|
||||||
@@ -126,10 +126,9 @@ fn query_batches_simple() {
|
|||||||
},
|
},
|
||||||
"startedAt": "1970-01-01T00:00:00Z",
|
"startedAt": "1970-01-01T00:00:00Z",
|
||||||
"finishedAt": null,
|
"finishedAt": null,
|
||||||
"enqueuedAt": null,
|
"enqueuedAt": null
|
||||||
"stopReason": "task with id 0 of type `indexCreation` cannot be batched"
|
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
|
|||||||
@@ -292,6 +292,8 @@ impl Queue {
|
|||||||
return Ok(task);
|
return Ok(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get rid of the mutability.
|
||||||
|
let task = task;
|
||||||
self.tasks.register(wtxn, &task)?;
|
self.tasks.register(wtxn, &task)?;
|
||||||
|
|
||||||
Ok(task)
|
Ok(task)
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -48,8 +49,8 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 3 of type `taskCancelation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -47,9 +48,9 @@ whalo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
@@ -42,7 +43,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -47,9 +48,9 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -52,10 +53,10 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 2 of type `indexSwap` cannot be batched", }
|
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 3 of type `indexSwap` cannot be batched", }
|
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -48,8 +49,8 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 3 of type `taskCancelation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -47,9 +48,9 @@ whalo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -47,9 +48,9 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -315,7 +315,7 @@ impl Queue {
|
|||||||
if let Some(batch_uids) = batch_uids {
|
if let Some(batch_uids) = batch_uids {
|
||||||
let mut batch_tasks = RoaringBitmap::new();
|
let mut batch_tasks = RoaringBitmap::new();
|
||||||
for batch_uid in batch_uids {
|
for batch_uid in batch_uids {
|
||||||
if processing_batch.as_ref().is_some_and(|batch| batch.uid == *batch_uid) {
|
if processing_batch.as_ref().map_or(false, |batch| batch.uid == *batch_uid) {
|
||||||
batch_tasks |= &**processing_tasks;
|
batch_tasks |= &**processing_tasks;
|
||||||
} else {
|
} else {
|
||||||
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;
|
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;
|
||||||
|
|||||||
@@ -364,7 +364,7 @@ fn test_task_queue_is_full() {
|
|||||||
// we won't be able to test this error in an integration test thus as a best effort test I still ensure the error return the expected error code
|
// we won't be able to test this error in an integration test thus as a best effort test I still ensure the error return the expected error code
|
||||||
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
||||||
|
|
||||||
// Even the task deletion and cancelation that don't delete anything should be refused
|
// Even the task deletion that doesn't delete anything shouldn't be accepted
|
||||||
let result = index_scheduler
|
let result = index_scheduler
|
||||||
.register(
|
.register(
|
||||||
KindWithContent::TaskDeletion { query: S("test"), tasks: RoaringBitmap::new() },
|
KindWithContent::TaskDeletion { query: S("test"), tasks: RoaringBitmap::new() },
|
||||||
@@ -373,39 +373,10 @@ fn test_task_queue_is_full() {
|
|||||||
)
|
)
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
||||||
let result = index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::TaskCancelation { query: S("test"), tasks: RoaringBitmap::new() },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap_err();
|
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
|
||||||
|
|
||||||
// we won't be able to test this error in an integration test thus as a best effort test I still ensure the error return the expected error code
|
// we won't be able to test this error in an integration test thus as a best effort test I still ensure the error return the expected error code
|
||||||
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
||||||
|
|
||||||
// But a task cancelation that cancel something should work
|
// But a task deletion that delete something should works
|
||||||
index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::TaskCancelation { query: S("test"), tasks: (0..100).collect() },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
handle.advance_one_successful_batch();
|
|
||||||
|
|
||||||
// But we should still be forbidden from enqueuing new tasks
|
|
||||||
let result = index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::IndexCreation { index_uid: S("doggo"), primary_key: None },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap_err();
|
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
|
||||||
|
|
||||||
// And a task deletion that delete something should works
|
|
||||||
index_scheduler
|
index_scheduler
|
||||||
.register(
|
.register(
|
||||||
KindWithContent::TaskDeletion { query: S("test"), tasks: (0..100).collect() },
|
KindWithContent::TaskDeletion { query: S("test"), tasks: (0..100).collect() },
|
||||||
|
|||||||
@@ -5,10 +5,9 @@ tasks affecting a single index into a [batch](crate::batch::Batch).
|
|||||||
The main function of the autobatcher is [`next_autobatch`].
|
The main function of the autobatcher is [`next_autobatch`].
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
use meilisearch_types::tasks::TaskId;
|
||||||
use std::ops::ControlFlow::{self, Break, Continue};
|
use std::ops::ControlFlow::{self, Break, Continue};
|
||||||
|
|
||||||
use meilisearch_types::tasks::{BatchStopReason, PrimaryKeyMismatchReason, TaskId};
|
|
||||||
|
|
||||||
use crate::KindWithContent;
|
use crate::KindWithContent;
|
||||||
|
|
||||||
/// Succinctly describes a task's [`Kind`](meilisearch_types::tasks::Kind)
|
/// Succinctly describes a task's [`Kind`](meilisearch_types::tasks::Kind)
|
||||||
@@ -146,42 +145,16 @@ impl BatchKind {
|
|||||||
// TODO use an AutoBatchKind as input
|
// TODO use an AutoBatchKind as input
|
||||||
pub fn new(
|
pub fn new(
|
||||||
task_id: TaskId,
|
task_id: TaskId,
|
||||||
kind_with_content: KindWithContent,
|
kind: KindWithContent,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
) -> (ControlFlow<(BatchKind, BatchStopReason), BatchKind>, bool) {
|
) -> (ControlFlow<BatchKind, BatchKind>, bool) {
|
||||||
use AutobatchKind as K;
|
use AutobatchKind as K;
|
||||||
|
|
||||||
let kind = kind_with_content.as_kind();
|
match AutobatchKind::from(kind) {
|
||||||
|
K::IndexCreation => (Break(BatchKind::IndexCreation { id: task_id }), true),
|
||||||
match AutobatchKind::from(kind_with_content) {
|
K::IndexDeletion => (Break(BatchKind::IndexDeletion { ids: vec![task_id] }), false),
|
||||||
K::IndexCreation => (
|
K::IndexUpdate => (Break(BatchKind::IndexUpdate { id: task_id }), false),
|
||||||
Break((
|
K::IndexSwap => (Break(BatchKind::IndexSwap { id: task_id }), false),
|
||||||
BatchKind::IndexCreation { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
true,
|
|
||||||
),
|
|
||||||
K::IndexDeletion => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexDeletion { ids: vec![task_id] },
|
|
||||||
BatchStopReason::IndexDeletion { id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::IndexUpdate => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexUpdate { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::IndexSwap => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexSwap { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::DocumentClear => (Continue(BatchKind::DocumentClear { ids: vec![task_id] }), false),
|
K::DocumentClear => (Continue(BatchKind::DocumentClear { ids: vec![task_id] }), false),
|
||||||
K::DocumentImport { allow_index_creation, primary_key: pk }
|
K::DocumentImport { allow_index_creation, primary_key: pk }
|
||||||
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
|
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
|
||||||
@@ -196,28 +169,15 @@ impl BatchKind {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
// if the primary key set in the task was different than ours we should stop and make this batch fail asap.
|
// if the primary key set in the task was different than ours we should stop and make this batch fail asap.
|
||||||
K::DocumentImport { allow_index_creation, primary_key: pk } => (
|
K::DocumentImport { allow_index_creation, primary_key } => (
|
||||||
Break((
|
Break(BatchKind::DocumentOperation {
|
||||||
BatchKind::DocumentOperation {
|
allow_index_creation,
|
||||||
allow_index_creation,
|
primary_key,
|
||||||
primary_key: pk.clone(),
|
operation_ids: vec![task_id],
|
||||||
operation_ids: vec![task_id],
|
}),
|
||||||
},
|
|
||||||
BatchStopReason::PrimaryKeyIndexMismatch {
|
|
||||||
id: task_id,
|
|
||||||
in_index: primary_key.unwrap().to_owned(),
|
|
||||||
in_task: pk.unwrap(),
|
|
||||||
},
|
|
||||||
)),
|
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
),
|
),
|
||||||
K::DocumentEdition => (
|
K::DocumentEdition => (Break(BatchKind::DocumentEdition { id: task_id }), false),
|
||||||
Break((
|
|
||||||
BatchKind::DocumentEdition { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::DocumentDeletion { by_filter: includes_by_filter } => (
|
K::DocumentDeletion { by_filter: includes_by_filter } => (
|
||||||
Continue(BatchKind::DocumentDeletion {
|
Continue(BatchKind::DocumentDeletion {
|
||||||
deletion_ids: vec![task_id],
|
deletion_ids: vec![task_id],
|
||||||
@@ -237,60 +197,43 @@ impl BatchKind {
|
|||||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||||
/// but false can't be returned if you needs to create an index.
|
/// but false can't be returned if you needs to create an index.
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
fn accumulate(self, id: TaskId, kind_with_content: KindWithContent, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<(BatchKind, BatchStopReason), BatchKind> {
|
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
||||||
use AutobatchKind as K;
|
use AutobatchKind as K;
|
||||||
|
|
||||||
let kind = kind_with_content.as_kind();
|
match (self, kind) {
|
||||||
let autobatch_kind = AutobatchKind::from(kind_with_content);
|
|
||||||
|
|
||||||
let pk: Option<String> = match (self.primary_key(), autobatch_kind.primary_key(), primary_key) {
|
|
||||||
// 1. If incoming task don't interact with primary key -> we can continue
|
|
||||||
(batch_pk, None | Some(None), _) => {
|
|
||||||
batch_pk.flatten().map(ToOwned::to_owned)
|
|
||||||
},
|
|
||||||
// 2.1 If we already have a primary-key ->
|
|
||||||
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
|
|
||||||
(_batch_pk, Some(Some(task_pk)), Some(index_pk)) => if task_pk == index_pk {
|
|
||||||
Some(task_pk.to_owned())
|
|
||||||
} else {
|
|
||||||
return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::TaskPrimaryKeyDifferFromIndexPrimaryKey {
|
|
||||||
task_pk: task_pk.to_owned(),
|
|
||||||
index_pk: index_pk.to_owned(),
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
},
|
|
||||||
// 2.2 If we don't have a primary-key ->
|
|
||||||
// 2.2.2 If the batch is set to Some(None), the task should be too
|
|
||||||
(Some(None), Some(Some(task_pk)), None) => return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::CannotInterfereWithPrimaryKeyGuessing {
|
|
||||||
task_pk: task_pk.to_owned(),
|
|
||||||
},
|
|
||||||
})),
|
|
||||||
(Some(Some(batch_pk)), Some(Some(task_pk)), None) => if task_pk == batch_pk {
|
|
||||||
Some(task_pk.to_owned())
|
|
||||||
} else {
|
|
||||||
let batch_pk = batch_pk.to_owned();
|
|
||||||
let task_pk = task_pk.to_owned();
|
|
||||||
return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::TaskPrimaryKeyDifferFromCurrentBatchPrimaryKey {
|
|
||||||
batch_pk,
|
|
||||||
task_pk
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
},
|
|
||||||
(None, Some(Some(task_pk)), None) => Some(task_pk.to_owned())
|
|
||||||
};
|
|
||||||
|
|
||||||
match (self, autobatch_kind) {
|
|
||||||
// We don't batch any of these operations
|
// We don't batch any of these operations
|
||||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
|
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break(this),
|
||||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||||
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
Break(this)
|
||||||
|
},
|
||||||
|
// NOTE: We need to negate the whole condition since we're checking if we need to break instead of continue.
|
||||||
|
// I wrote it this way because it's easier to understand than the other way around.
|
||||||
|
(this, kind) if !(
|
||||||
|
// 1. If both task don't interact with primary key -> we can continue
|
||||||
|
(this.primary_key().is_none() && kind.primary_key().is_none()) ||
|
||||||
|
// 2. Else ->
|
||||||
|
(
|
||||||
|
// 2.1 If we already have a primary-key ->
|
||||||
|
(
|
||||||
|
primary_key.is_some() &&
|
||||||
|
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
|
||||||
|
// 2.1.2 If the task don't have a primary-key -> we can continue
|
||||||
|
kind.primary_key().map_or(true, |pk| pk == primary_key)
|
||||||
|
) ||
|
||||||
|
// 2.2 If we don't have a primary-key ->
|
||||||
|
(
|
||||||
|
// 2.2.1 If both the batch and the task have a primary key they should be equal
|
||||||
|
// 2.2.2 If the batch is set to Some(None), the task should be too
|
||||||
|
// 2.2.3 If the batch is set to None -> we can continue
|
||||||
|
this.primary_key().zip(kind.primary_key()).map_or(true, |(this, kind)| this == kind)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
) // closing the negation
|
||||||
|
|
||||||
|
=> {
|
||||||
|
Break(this)
|
||||||
},
|
},
|
||||||
// The index deletion can batch with everything but must stop after
|
// The index deletion can batch with everything but must stop after
|
||||||
(
|
(
|
||||||
@@ -301,7 +244,7 @@ impl BatchKind {
|
|||||||
K::IndexDeletion,
|
K::IndexDeletion,
|
||||||
) => {
|
) => {
|
||||||
ids.push(id);
|
ids.push(id);
|
||||||
Break((BatchKind::IndexDeletion { ids }, BatchStopReason::IndexDeletion { id }))
|
Break(BatchKind::IndexDeletion { ids })
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
BatchKind::ClearAndSettings { settings_ids: mut ids, allow_index_creation: _, mut other },
|
BatchKind::ClearAndSettings { settings_ids: mut ids, allow_index_creation: _, mut other },
|
||||||
@@ -309,7 +252,7 @@ impl BatchKind {
|
|||||||
) => {
|
) => {
|
||||||
ids.push(id);
|
ids.push(id);
|
||||||
ids.append(&mut other);
|
ids.append(&mut other);
|
||||||
Break((BatchKind::IndexDeletion { ids }, BatchStopReason::IndexDeletion { id }))
|
Break(BatchKind::IndexDeletion { ids })
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
@@ -322,7 +265,7 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::DocumentClear { .. },
|
this @ BatchKind::DocumentClear { .. },
|
||||||
K::DocumentImport { .. } | K::Settings { .. },
|
K::DocumentImport { .. } | K::Settings { .. },
|
||||||
) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation: _, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { allow_index_creation: _, primary_key: _, mut operation_ids },
|
||||||
K::DocumentClear,
|
K::DocumentClear,
|
||||||
@@ -334,7 +277,7 @@ impl BatchKind {
|
|||||||
// we can autobatch different kind of document operations and mix replacements with updates
|
// we can autobatch different kind of document operations and mix replacements with updates
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { allow_index_creation, primary_key: _, mut operation_ids },
|
||||||
K::DocumentImport { primary_key: _, .. },
|
K::DocumentImport { primary_key: pk, .. },
|
||||||
) => {
|
) => {
|
||||||
operation_ids.push(id);
|
operation_ids.push(id);
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
@@ -344,15 +287,15 @@ impl BatchKind {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { allow_index_creation, primary_key, mut operation_ids },
|
||||||
K::DocumentDeletion { by_filter: false },
|
K::DocumentDeletion { by_filter: false },
|
||||||
) => {
|
) => {
|
||||||
operation_ids.push(id);
|
operation_ids.push(id);
|
||||||
|
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
operation_ids,
|
operation_ids,
|
||||||
primary_key: pk,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// We can't batch a document operation with a delete by filter
|
// We can't batch a document operation with a delete by filter
|
||||||
@@ -360,12 +303,12 @@ impl BatchKind {
|
|||||||
this @ BatchKind::DocumentOperation { .. },
|
this @ BatchKind::DocumentOperation { .. },
|
||||||
K::DocumentDeletion { by_filter: true },
|
K::DocumentDeletion { by_filter: true },
|
||||||
) => {
|
) => {
|
||||||
Break((this, BatchStopReason::DocumentOperationWithDeletionByFilter { id }))
|
Break(this)
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentOperation { .. },
|
this @ BatchKind::DocumentOperation { .. },
|
||||||
K::Settings { .. },
|
K::Settings { .. },
|
||||||
) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
) => Break(this),
|
||||||
|
|
||||||
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: _ }, K::DocumentClear) => {
|
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: _ }, K::DocumentClear) => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
@@ -375,7 +318,7 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::DocumentDeletion { deletion_ids: _, includes_by_filter: true },
|
this @ BatchKind::DocumentDeletion { deletion_ids: _, includes_by_filter: true },
|
||||||
K::DocumentImport { .. }
|
K::DocumentImport { .. }
|
||||||
) => Break((this, BatchStopReason::DeletionByFilterWithDocumentOperation { id })),
|
) => Break(this),
|
||||||
// we can autobatch the deletion and import if the index already exists
|
// we can autobatch the deletion and import if the index already exists
|
||||||
(
|
(
|
||||||
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
||||||
@@ -402,18 +345,18 @@ impl BatchKind {
|
|||||||
operation_ids: deletion_ids,
|
operation_ids: deletion_ids,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// we can't autobatch a deletion and an import if the index does not exist but would be created by an addition
|
// we can't autobatch a deletion and an import if the index does not exists but would be created by an addition
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentDeletion { .. },
|
this @ BatchKind::DocumentDeletion { .. },
|
||||||
K::DocumentImport { .. }
|
K::DocumentImport { .. }
|
||||||
) => {
|
) => {
|
||||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
Break(this)
|
||||||
}
|
}
|
||||||
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter }, K::DocumentDeletion { by_filter }) => {
|
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter }, K::DocumentDeletion { by_filter }) => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
Continue(BatchKind::DocumentDeletion { deletion_ids, includes_by_filter: includes_by_filter | by_filter })
|
Continue(BatchKind::DocumentDeletion { deletion_ids, includes_by_filter: includes_by_filter | by_filter })
|
||||||
}
|
}
|
||||||
(this @ BatchKind::DocumentDeletion { .. }, K::Settings { .. }) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
(this @ BatchKind::DocumentDeletion { .. }, K::Settings { .. }) => Break(this),
|
||||||
|
|
||||||
(
|
(
|
||||||
BatchKind::Settings { settings_ids, allow_index_creation },
|
BatchKind::Settings { settings_ids, allow_index_creation },
|
||||||
@@ -426,7 +369,7 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::Settings { .. },
|
this @ BatchKind::Settings { .. },
|
||||||
K::DocumentImport { .. } | K::DocumentDeletion { .. },
|
K::DocumentImport { .. } | K::DocumentDeletion { .. },
|
||||||
) => Break((this, BatchStopReason::SettingsWithDocumentOperation { id })),
|
) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::Settings { mut settings_ids, allow_index_creation },
|
BatchKind::Settings { mut settings_ids, allow_index_creation },
|
||||||
K::Settings { .. },
|
K::Settings { .. },
|
||||||
@@ -449,7 +392,7 @@ impl BatchKind {
|
|||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(this @ BatchKind::ClearAndSettings { .. }, K::DocumentImport { .. }) => Break((this, BatchStopReason::SettingsWithDocumentOperation { id })),
|
(this @ BatchKind::ClearAndSettings { .. }, K::DocumentImport { .. }) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::ClearAndSettings {
|
BatchKind::ClearAndSettings {
|
||||||
mut other,
|
mut other,
|
||||||
@@ -505,7 +448,7 @@ pub fn autobatch(
|
|||||||
enqueued: Vec<(TaskId, KindWithContent)>,
|
enqueued: Vec<(TaskId, KindWithContent)>,
|
||||||
index_already_exists: bool,
|
index_already_exists: bool,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
) -> Option<(BatchKind, bool, Option<BatchStopReason>)> {
|
) -> Option<(BatchKind, bool)> {
|
||||||
let mut enqueued = enqueued.into_iter();
|
let mut enqueued = enqueued.into_iter();
|
||||||
let (id, kind) = enqueued.next()?;
|
let (id, kind) = enqueued.next()?;
|
||||||
|
|
||||||
@@ -514,22 +457,18 @@ pub fn autobatch(
|
|||||||
|
|
||||||
let (mut acc, must_create_index) = match BatchKind::new(id, kind, primary_key) {
|
let (mut acc, must_create_index) = match BatchKind::new(id, kind, primary_key) {
|
||||||
(Continue(acc), create) => (acc, create),
|
(Continue(acc), create) => (acc, create),
|
||||||
(Break((acc, batch_stop_reason)), create) => {
|
(Break(acc), create) => return Some((acc, create)),
|
||||||
return Some((acc, create, Some(batch_stop_reason)))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// if an index has been created in the previous step we can consider it as existing.
|
// if an index has been created in the previous step we can consider it as existing.
|
||||||
index_exist |= must_create_index;
|
index_exist |= must_create_index;
|
||||||
|
|
||||||
for (id, kind_with_content) in enqueued {
|
for (id, kind) in enqueued {
|
||||||
acc = match acc.accumulate(id, kind_with_content, index_exist, primary_key) {
|
acc = match acc.accumulate(id, kind.into(), index_exist, primary_key) {
|
||||||
Continue(acc) => acc,
|
Continue(acc) => acc,
|
||||||
Break((acc, batch_stop_reason)) => {
|
Break(acc) => return Some((acc, must_create_index)),
|
||||||
return Some((acc, must_create_index, Some(batch_stop_reason)))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Some((acc, must_create_index, None))
|
Some((acc, must_create_index))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
||||||
self, ReplaceDocuments, UpdateDocuments,
|
self, ReplaceDocuments, UpdateDocuments,
|
||||||
};
|
};
|
||||||
use meilisearch_types::tasks::{BatchStopReason, IndexSwap, KindWithContent};
|
use meilisearch_types::tasks::{IndexSwap, KindWithContent};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use self::autobatcher::{autobatch, BatchKind};
|
use self::autobatcher::{autobatch, BatchKind};
|
||||||
@@ -20,7 +20,7 @@ fn autobatch_from(
|
|||||||
index_already_exists: bool,
|
index_already_exists: bool,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
input: impl IntoIterator<Item = KindWithContent>,
|
input: impl IntoIterator<Item = KindWithContent>,
|
||||||
) -> Option<(BatchKind, bool, Option<BatchStopReason>)> {
|
) -> Option<(BatchKind, bool)> {
|
||||||
autobatch(
|
autobatch(
|
||||||
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
|
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
|
||||||
index_already_exists,
|
index_already_exists,
|
||||||
@@ -92,304 +92,304 @@ fn idx_swap() -> KindWithContent {
|
|||||||
fn autobatch_simple_operation_together() {
|
fn autobatch_simple_operation_together() {
|
||||||
// we can autobatch one or multiple `ReplaceDocuments` together.
|
// we can autobatch one or multiple `ReplaceDocuments` together.
|
||||||
// if the index exists.
|
// if the index exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// if it doesn't exists.
|
// if it doesn't exists.
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple `UpdateDocuments` together.
|
// we can autobatch one or multiple `UpdateDocuments` together.
|
||||||
// if the index exists.
|
// if the index exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// if it doesn't exists.
|
// if it doesn't exists.
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple DocumentDeletion together
|
// we can autobatch one or multiple DocumentDeletion together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple DocumentDeletionByFilter together
|
// we can autobatch one or multiple DocumentDeletionByFilter together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple Settings together
|
// we can autobatch one or multiple Settings together
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// We can autobatch document addition with document deletion
|
// We can autobatch document addition with document deletion
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
// And the other way around
|
// And the other way around
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
|
|
||||||
// But we can't autobatch document addition with document deletion by filter
|
// But we can't autobatch document addition with document deletion by filter
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
// And the other way around
|
// And the other way around
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn simple_different_document_operations_autobatch_together() {
|
fn simple_different_document_operations_autobatch_together() {
|
||||||
// addition and updates with deletion by filter can't batch together
|
// addition and updates with deletion by filter can't batch together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn document_addition_doesnt_batch_with_settings() {
|
fn document_addition_doesnt_batch_with_settings() {
|
||||||
// simple case
|
// simple case
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
// multiple settings and doc addition
|
// multiple settings and doc addition
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, Some(DocumentOperationWithSettings { id: 2 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, Some(DocumentOperationWithSettings { id: 2 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
|
|
||||||
// addition and setting unordered
|
// addition and setting unordered
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
// Doesn't batch with other forbidden operations
|
// Doesn't batch with other forbidden operations
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_and_additions() {
|
fn clear_and_additions() {
|
||||||
// these two doesn't need to batch
|
// these two doesn't need to batch
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
|
|
||||||
// Basic use case
|
// Basic use case
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
|
|
||||||
// This batch kind doesn't mix with other document addition
|
// This batch kind doesn't mix with other document addition
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, Some(DocumentOperationWithSettings { id: 3 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, Some(DocumentOperationWithSettings { id: 3 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
|
|
||||||
// But you can batch multiple clear together
|
// But you can batch multiple clear together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_and_additions_and_settings() {
|
fn clear_and_additions_and_settings() {
|
||||||
// A clear don't need to autobatch the settings that happens AFTER there is no documents
|
// A clear don't need to autobatch the settings that happens AFTER there is no documents
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), settings(true)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), settings(true)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), doc_clr(), settings(true)]), @"Some((ClearAndSettings { other: [1], allow_index_creation: true, settings_ids: [0, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), doc_clr(), settings(true)]), @"Some((ClearAndSettings { other: [1], allow_index_creation: true, settings_ids: [0, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn anything_and_index_deletion() {
|
fn anything_and_index_deletion() {
|
||||||
// The `IndexDeletion` doesn't batch with anything that happens AFTER.
|
// The `IndexDeletion` doesn't batch with anything that happens AFTER.
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
|
|
||||||
// The index deletion can accept almost any type of `BatchKind` and transform it to an `IndexDeletion`.
|
// The index deletion can accept almost any type of `BatchKind` and transform it to an `IndexDeletion`.
|
||||||
// First, the basic cases
|
// First, the basic cases
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn allowed_and_disallowed_index_creation() {
|
fn allowed_and_disallowed_index_creation() {
|
||||||
// `DocumentImport` can't be mixed with those disallowed to do so except if the index already exists.
|
// `DocumentImport` can't be mixed with those disallowed to do so except if the index already exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
// batch deletion and addition
|
// batch deletion and addition
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn autobatch_primary_key() {
|
fn autobatch_primary_key() {
|
||||||
// ==> If I have a pk
|
// ==> If I have a pk
|
||||||
// With a single update
|
// With a single update
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// With a multiple updates
|
// With a multiple updates
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1, 2] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1, 2] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// ==> If I don't have a pk
|
// ==> If I don't have a pk
|
||||||
// With a single update
|
// With a single update
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// With a multiple updates
|
// With a multiple updates
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: CannotInterfereWithPrimaryKeyGuessing { task_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::fmt;
|
|||||||
use meilisearch_types::heed::RoTxn;
|
use meilisearch_types::heed::RoTxn;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::{Settings, Unchecked};
|
use meilisearch_types::settings::{Settings, Unchecked};
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
@@ -423,8 +423,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create the next batch to be processed;
|
/// Create the next batch to be processed;
|
||||||
/// 0. We get the *last* task to cancel.
|
/// 1. We get the *last* task to cancel.
|
||||||
/// 1. We get the tasks to upgrade.
|
|
||||||
/// 2. We get the *next* task to delete.
|
/// 2. We get the *next* task to delete.
|
||||||
/// 3. We get the *next* snapshot to process.
|
/// 3. We get the *next* snapshot to process.
|
||||||
/// 4. We get the *next* dump to process.
|
/// 4. We get the *next* dump to process.
|
||||||
@@ -441,23 +440,9 @@ impl IndexScheduler {
|
|||||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||||
|
|
||||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
let count_total_enqueued = enqueued.len();
|
|
||||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||||
|
|
||||||
// 0. we get the last task to cancel.
|
// 0. The priority over everything is to upgrade the instance
|
||||||
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
|
||||||
if let Some(task_id) = to_cancel.max() {
|
|
||||||
let mut task =
|
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::TaskCancelation,
|
|
||||||
id: task_id,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::TaskCancelation { task }, current_batch)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. We upgrade the instance
|
|
||||||
// There shouldn't be multiple upgrade tasks but just in case we're going to batch all of them at the same time
|
// There shouldn't be multiple upgrade tasks but just in case we're going to batch all of them at the same time
|
||||||
let upgrade = self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)? & (enqueued | failed);
|
let upgrade = self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)? & (enqueued | failed);
|
||||||
if !upgrade.is_empty() {
|
if !upgrade.is_empty() {
|
||||||
@@ -468,26 +453,16 @@ impl IndexScheduler {
|
|||||||
current_batch.uid = batch_uid;
|
current_batch.uid = batch_uid;
|
||||||
}
|
}
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::UpgradeDatabase });
|
|
||||||
return Ok(Some((Batch::UpgradeDatabase { tasks }, current_batch)));
|
return Ok(Some((Batch::UpgradeDatabase { tasks }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the version of the scheduler here.
|
// 1. we get the last task to cancel.
|
||||||
// if the version is not the current, refuse to batch any additional task.
|
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
||||||
let version = self.version.get_version(rtxn)?;
|
if let Some(task_id) = to_cancel.max() {
|
||||||
let package_version = (
|
let mut task =
|
||||||
meilisearch_types::versioning::VERSION_MAJOR,
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
meilisearch_types::versioning::VERSION_MINOR,
|
current_batch.processing(Some(&mut task));
|
||||||
meilisearch_types::versioning::VERSION_PATCH,
|
return Ok(Some((Batch::TaskCancelation { task }, current_batch)));
|
||||||
);
|
|
||||||
if version != Some(package_version) {
|
|
||||||
return Err(Error::UnrecoverableError(Box::new(
|
|
||||||
Error::IndexSchedulerVersionMismatch {
|
|
||||||
index_scheduler_version: version.unwrap_or((1, 12, 0)),
|
|
||||||
package_version,
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. we get the next task to delete
|
// 2. we get the next task to delete
|
||||||
@@ -495,8 +470,6 @@ impl IndexScheduler {
|
|||||||
if !to_delete.is_empty() {
|
if !to_delete.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::TaskDeletion });
|
|
||||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -505,8 +478,6 @@ impl IndexScheduler {
|
|||||||
if !to_snapshot.is_empty() {
|
if !to_snapshot.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::SnapshotCreation });
|
|
||||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -516,10 +487,6 @@ impl IndexScheduler {
|
|||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, to_dump)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, to_dump)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::DumpCreation,
|
|
||||||
id: task.uid,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -537,10 +504,6 @@ impl IndexScheduler {
|
|||||||
} else {
|
} else {
|
||||||
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::IndexSwap,
|
|
||||||
id: task.uid,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -562,14 +525,9 @@ impl IndexScheduler {
|
|||||||
1
|
1
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut stop_reason = BatchStopReason::default();
|
|
||||||
let mut enqueued = Vec::new();
|
let mut enqueued = Vec::new();
|
||||||
let mut total_size: u64 = 0;
|
let mut total_size: u64 = 0;
|
||||||
for task_id in index_tasks.into_iter() {
|
for task_id in index_tasks.into_iter().take(tasks_limit) {
|
||||||
if enqueued.len() >= tasks_limit {
|
|
||||||
stop_reason = BatchStopReason::ReachedTaskLimit { task_limit: tasks_limit };
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let task = self
|
let task = self
|
||||||
.queue
|
.queue
|
||||||
.tasks
|
.tasks
|
||||||
@@ -581,27 +539,16 @@ impl IndexScheduler {
|
|||||||
total_size = total_size.saturating_add(content_size);
|
total_size = total_size.saturating_add(content_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
let size_limit = self.scheduler.batched_tasks_size_limit;
|
if total_size > self.scheduler.batched_tasks_size_limit && !enqueued.is_empty() {
|
||||||
if total_size > size_limit && !enqueued.is_empty() {
|
|
||||||
stop_reason = BatchStopReason::ReachedSizeLimit { size_limit, size: total_size };
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
enqueued.push((task.uid, task.kind));
|
enqueued.push((task.uid, task.kind));
|
||||||
}
|
}
|
||||||
|
|
||||||
stop_reason.replace_unspecified({
|
if let Some((batchkind, create_index)) =
|
||||||
if enqueued.len() == count_total_enqueued as usize {
|
|
||||||
BatchStopReason::ExhaustedEnqueuedTasks
|
|
||||||
} else {
|
|
||||||
BatchStopReason::ExhaustedEnqueuedTasksForIndex { index: index_name.to_owned() }
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some((batchkind, create_index, autobatch_stop_reason)) =
|
|
||||||
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
||||||
{
|
{
|
||||||
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
|
||||||
return Ok(self
|
return Ok(self
|
||||||
.create_next_batch_index(
|
.create_next_batch_index(
|
||||||
rtxn,
|
rtxn,
|
||||||
|
|||||||
@@ -20,12 +20,10 @@ use std::path::PathBuf;
|
|||||||
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use convert_case::{Case, Casing as _};
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::{Env, WithoutTls};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use process_batch::ProcessBatchInfo;
|
|
||||||
use rayon::current_num_threads;
|
use rayon::current_num_threads;
|
||||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@@ -225,16 +223,16 @@ impl IndexScheduler {
|
|||||||
let mut stop_scheduler_forever = false;
|
let mut stop_scheduler_forever = false;
|
||||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
let mut canceled = RoaringBitmap::new();
|
let mut canceled = RoaringBitmap::new();
|
||||||
let mut process_batch_info = ProcessBatchInfo::default();
|
let mut congestion = None;
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Ok((tasks, info)) => {
|
Ok((tasks, cong)) => {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchSucceeded);
|
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchSucceeded);
|
||||||
|
|
||||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(tasks.len() as u32);
|
let (task_progress, task_progress_obj) = AtomicTaskStep::new(tasks.len() as u32);
|
||||||
progress.update_progress(task_progress_obj);
|
progress.update_progress(task_progress_obj);
|
||||||
process_batch_info = info;
|
congestion = cong;
|
||||||
let mut success = 0;
|
let mut success = 0;
|
||||||
let mut failure = 0;
|
let mut failure = 0;
|
||||||
let mut canceled_by = None;
|
let mut canceled_by = None;
|
||||||
@@ -352,9 +350,6 @@ impl IndexScheduler {
|
|||||||
// We must re-add the canceled task so they're part of the same batch.
|
// We must re-add the canceled task so they're part of the same batch.
|
||||||
ids |= canceled;
|
ids |= canceled;
|
||||||
|
|
||||||
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
|
||||||
process_batch_info;
|
|
||||||
|
|
||||||
processing_batch.stats.progress_trace =
|
processing_batch.stats.progress_trace =
|
||||||
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
||||||
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
||||||
@@ -364,33 +359,6 @@ impl IndexScheduler {
|
|||||||
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
||||||
congestion_info
|
congestion_info
|
||||||
});
|
});
|
||||||
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
|
|
||||||
.iter()
|
|
||||||
.flat_map(|(dbname, pre_size)| {
|
|
||||||
post_commit_dabases_sizes
|
|
||||||
.get(dbname)
|
|
||||||
.map(|post_size| {
|
|
||||||
use byte_unit::{Byte, UnitType::Binary};
|
|
||||||
use std::cmp::Ordering::{Equal, Greater, Less};
|
|
||||||
|
|
||||||
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
|
||||||
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
|
||||||
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
|
||||||
let sign = match post_size.cmp(pre_size) {
|
|
||||||
Equal => return None,
|
|
||||||
Greater => "+",
|
|
||||||
Less => "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
Some((
|
|
||||||
dbname.to_case(Case::Camel),
|
|
||||||
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if let Some(congestion) = congestion {
|
if let Some(congestion) = congestion {
|
||||||
tracing::debug!(
|
tracing::debug!(
|
||||||
|
|||||||
@@ -6,14 +6,13 @@ use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
|||||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||||
use meilisearch_types::milli::{self, ChannelCongestion};
|
use meilisearch_types::milli::{self, ChannelCongestion};
|
||||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, IndexSwap, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
|
||||||
use milli::update::Settings as MilliSettings;
|
use milli::update::Settings as MilliSettings;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
use super::create_batch::Batch;
|
||||||
use crate::processing::{
|
use crate::processing::{
|
||||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress,
|
||||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||||
UpdateIndexProgress,
|
UpdateIndexProgress,
|
||||||
};
|
};
|
||||||
@@ -23,16 +22,6 @@ use crate::utils::{
|
|||||||
};
|
};
|
||||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct ProcessBatchInfo {
|
|
||||||
/// The write channel congestion. None when unavailable: settings update.
|
|
||||||
pub congestion: Option<ChannelCongestion>,
|
|
||||||
/// The sizes of the different databases before starting the indexation.
|
|
||||||
pub pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
/// The sizes of the different databases after commiting the indexation.
|
|
||||||
pub post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
/// Apply the operation associated with the given batch.
|
/// Apply the operation associated with the given batch.
|
||||||
///
|
///
|
||||||
@@ -46,7 +35,7 @@ impl IndexScheduler {
|
|||||||
batch: Batch,
|
batch: Batch,
|
||||||
current_batch: &mut ProcessingBatch,
|
current_batch: &mut ProcessingBatch,
|
||||||
progress: Progress,
|
progress: Progress,
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
{
|
{
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::InsideProcessBatch)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::InsideProcessBatch)?;
|
||||||
@@ -87,7 +76,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
canceled_tasks.push(task);
|
canceled_tasks.push(task);
|
||||||
|
|
||||||
Ok((canceled_tasks, ProcessBatchInfo::default()))
|
Ok((canceled_tasks, None))
|
||||||
}
|
}
|
||||||
Batch::TaskDeletions(mut tasks) => {
|
Batch::TaskDeletions(mut tasks) => {
|
||||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||||
@@ -126,14 +115,14 @@ impl IndexScheduler {
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok((tasks, None))
|
||||||
|
}
|
||||||
|
Batch::SnapshotCreation(tasks) => {
|
||||||
|
self.process_snapshot(progress, tasks).map(|tasks| (tasks, None))
|
||||||
|
}
|
||||||
|
Batch::Dump(task) => {
|
||||||
|
self.process_dump_creation(progress, task).map(|tasks| (tasks, None))
|
||||||
}
|
}
|
||||||
Batch::SnapshotCreation(tasks) => self
|
|
||||||
.process_snapshot(progress, tasks)
|
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
|
||||||
Batch::Dump(task) => self
|
|
||||||
.process_dump_creation(progress, task)
|
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
|
||||||
Batch::IndexOperation { op, must_create_index } => {
|
Batch::IndexOperation { op, must_create_index } => {
|
||||||
let index_uid = op.index_uid().to_string();
|
let index_uid = op.index_uid().to_string();
|
||||||
let index = if must_create_index {
|
let index = if must_create_index {
|
||||||
@@ -145,28 +134,15 @@ impl IndexScheduler {
|
|||||||
self.index_mapper.index(&rtxn, &index_uid)?
|
self.index_mapper.index(&rtxn, &index_uid)?
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut index_wtxn = index.write_txn()?;
|
|
||||||
|
|
||||||
let index_version = index.get_version(&index_wtxn)?.unwrap_or((1, 12, 0));
|
|
||||||
let package_version = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH);
|
|
||||||
if index_version != package_version {
|
|
||||||
return Err(Error::IndexVersionMismatch {
|
|
||||||
index: index_uid,
|
|
||||||
index_version,
|
|
||||||
package_version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
||||||
self.index_mapper
|
self.index_mapper
|
||||||
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
||||||
|
|
||||||
let pre_commit_dabases_sizes = index.database_sizes(&index_wtxn)?;
|
let mut index_wtxn = index.write_txn()?;
|
||||||
let (tasks, congestion) =
|
let (tasks, congestion) =
|
||||||
self.apply_index_operation(&mut index_wtxn, &index, op, &progress)?;
|
self.apply_index_operation(&mut index_wtxn, &index, op, progress)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
progress.update_progress(FinalizingIndexStep::Committing);
|
|
||||||
let span = tracing::trace_span!(target: "indexing::scheduler", "commit");
|
let span = tracing::trace_span!(target: "indexing::scheduler", "commit");
|
||||||
let _entered = span.enter();
|
let _entered = span.enter();
|
||||||
|
|
||||||
@@ -177,15 +153,12 @@ impl IndexScheduler {
|
|||||||
// stats of the index. Since the tasks have already been processed and
|
// stats of the index. Since the tasks have already been processed and
|
||||||
// this is a non-critical operation. If it fails, we should not fail
|
// this is a non-critical operation. If it fails, we should not fail
|
||||||
// the entire batch.
|
// the entire batch.
|
||||||
let mut post_commit_dabases_sizes = None;
|
|
||||||
let res = || -> Result<()> {
|
let res = || -> Result<()> {
|
||||||
progress.update_progress(FinalizingIndexStep::ComputingStats);
|
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||||
post_commit_dabases_sizes = Some(index.database_sizes(&index_rtxn)?);
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}();
|
}();
|
||||||
@@ -198,16 +171,7 @@ impl IndexScheduler {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
let info = ProcessBatchInfo {
|
Ok((tasks, congestion))
|
||||||
congestion,
|
|
||||||
// In case we fail to the get post-commit sizes we decide
|
|
||||||
// that nothing changed and use the pre-commit sizes.
|
|
||||||
post_commit_dabases_sizes: post_commit_dabases_sizes
|
|
||||||
.unwrap_or_else(|| pre_commit_dabases_sizes.clone()),
|
|
||||||
pre_commit_dabases_sizes,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((tasks, info))
|
|
||||||
}
|
}
|
||||||
Batch::IndexCreation { index_uid, primary_key, task } => {
|
Batch::IndexCreation { index_uid, primary_key, task } => {
|
||||||
progress.update_progress(CreateIndexProgress::CreatingTheIndex);
|
progress.update_progress(CreateIndexProgress::CreatingTheIndex);
|
||||||
@@ -275,7 +239,7 @@ impl IndexScheduler {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
Ok((vec![task], None))
|
||||||
}
|
}
|
||||||
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
||||||
progress.update_progress(DeleteIndexProgress::DeletingTheIndex);
|
progress.update_progress(DeleteIndexProgress::DeletingTheIndex);
|
||||||
@@ -309,9 +273,7 @@ impl IndexScheduler {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here we could also show that all the internal database sizes goes to 0
|
Ok((tasks, None))
|
||||||
// but it would mean opening the index and that's costly.
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
|
||||||
}
|
}
|
||||||
Batch::IndexSwap { mut task } => {
|
Batch::IndexSwap { mut task } => {
|
||||||
progress.update_progress(SwappingTheIndexes::EnsuringCorrectnessOfTheSwap);
|
progress.update_progress(SwappingTheIndexes::EnsuringCorrectnessOfTheSwap);
|
||||||
@@ -359,17 +321,15 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
task.status = Status::Succeeded;
|
task.status = Status::Succeeded;
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
Ok((vec![task], None))
|
||||||
}
|
}
|
||||||
Batch::UpgradeDatabase { mut tasks } => {
|
Batch::UpgradeDatabase { mut tasks } => {
|
||||||
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(from, progress)));
|
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(from, progress)));
|
||||||
match ret {
|
match ret {
|
||||||
Ok(Ok(())) => (),
|
Ok(Ok(())) => (),
|
||||||
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
|
|
||||||
Ok(Err(e)) => return Err(Error::DatabaseUpgrade(Box::new(e))),
|
Ok(Err(e)) => return Err(Error::DatabaseUpgrade(Box::new(e))),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let msg = match e.downcast_ref::<&'static str>() {
|
let msg = match e.downcast_ref::<&'static str>() {
|
||||||
@@ -391,7 +351,7 @@ impl IndexScheduler {
|
|||||||
task.error = None;
|
task.error = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok((tasks, None))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -667,79 +627,17 @@ impl IndexScheduler {
|
|||||||
progress: &Progress,
|
progress: &Progress,
|
||||||
) -> Result<Vec<Task>> {
|
) -> Result<Vec<Task>> {
|
||||||
progress.update_progress(TaskCancelationProgress::RetrievingTasks);
|
progress.update_progress(TaskCancelationProgress::RetrievingTasks);
|
||||||
let mut tasks_to_cancel = RoaringBitmap::new();
|
|
||||||
|
|
||||||
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
|
||||||
|
|
||||||
// 0. Check if any upgrade task was matched.
|
|
||||||
// If so, we cancel all the failed or enqueued upgrade tasks.
|
|
||||||
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
|
|
||||||
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
|
|
||||||
if is_canceling_upgrade {
|
|
||||||
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
|
||||||
tasks_to_cancel |= upgrade_tasks & (enqueued_tasks | failed_tasks);
|
|
||||||
}
|
|
||||||
// 1. Remove from this list the tasks that we are not allowed to cancel
|
// 1. Remove from this list the tasks that we are not allowed to cancel
|
||||||
// Notice that only the _enqueued_ ones are cancelable and we should
|
// Notice that only the _enqueued_ ones are cancelable and we should
|
||||||
// have already aborted the indexation of the _processing_ ones
|
// have already aborted the indexation of the _processing_ ones
|
||||||
tasks_to_cancel |= enqueued_tasks & matched_tasks;
|
let cancelable_tasks = self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
|
let tasks_to_cancel = cancelable_tasks & matched_tasks;
|
||||||
|
|
||||||
// 2. If we're canceling an upgrade, attempt the rollback
|
|
||||||
if let Some(latest_upgrade_task) = (&tasks_to_cancel & upgrade_tasks).max() {
|
|
||||||
progress.update_progress(TaskCancelationProgress::CancelingUpgrade);
|
|
||||||
|
|
||||||
let task = self.queue.tasks.get_task(rtxn, latest_upgrade_task)?.unwrap();
|
|
||||||
let Some(Details::UpgradeDatabase { from, to }) = task.details else {
|
|
||||||
unreachable!("wrong details for upgrade task {latest_upgrade_task}")
|
|
||||||
};
|
|
||||||
|
|
||||||
// check that we are rollbacking an upgrade to the current Meilisearch
|
|
||||||
let bin_major: u32 = meilisearch_types::versioning::VERSION_MAJOR;
|
|
||||||
let bin_minor: u32 = meilisearch_types::versioning::VERSION_MINOR;
|
|
||||||
let bin_patch: u32 = meilisearch_types::versioning::VERSION_PATCH;
|
|
||||||
|
|
||||||
if to == (bin_major, bin_minor, bin_patch) {
|
|
||||||
tracing::warn!(
|
|
||||||
"Rollbacking from v{}.{}.{} to v{}.{}.{}",
|
|
||||||
to.0,
|
|
||||||
to.1,
|
|
||||||
to.2,
|
|
||||||
from.0,
|
|
||||||
from.1,
|
|
||||||
from.2
|
|
||||||
);
|
|
||||||
match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
|
||||||
self.process_rollback(from, progress)
|
|
||||||
})) {
|
|
||||||
Ok(Ok(())) => {}
|
|
||||||
Ok(Err(err)) => return Err(Error::DatabaseUpgrade(Box::new(err))),
|
|
||||||
Err(e) => {
|
|
||||||
let msg = match e.downcast_ref::<&'static str>() {
|
|
||||||
Some(s) => *s,
|
|
||||||
None => match e.downcast_ref::<String>() {
|
|
||||||
Some(s) => &s[..],
|
|
||||||
None => "Box<dyn Any>",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return Err(Error::DatabaseUpgrade(Box::new(Error::ProcessBatchPanicked(
|
|
||||||
msg.to_string(),
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tracing::debug!(
|
|
||||||
"Not rollbacking an upgrade targetting the earlier version v{}.{}.{}",
|
|
||||||
bin_major,
|
|
||||||
bin_minor,
|
|
||||||
bin_patch
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. We now have a list of tasks to cancel, cancel them
|
|
||||||
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
||||||
progress.update_progress(progress_obj);
|
progress.update_progress(progress_obj);
|
||||||
|
|
||||||
|
// 2. We now have a list of tasks to cancel, cancel them
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(
|
let mut tasks = self.queue.tasks.get_existing_tasks(
|
||||||
rtxn,
|
rtxn,
|
||||||
tasks_to_cancel.iter().inspect(|_| {
|
tasks_to_cancel.iter().inspect(|_| {
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ impl IndexScheduler {
|
|||||||
index_wtxn: &mut RwTxn<'i>,
|
index_wtxn: &mut RwTxn<'i>,
|
||||||
index: &'i Index,
|
index: &'i Index,
|
||||||
operation: IndexOperation,
|
operation: IndexOperation,
|
||||||
progress: &Progress,
|
progress: Progress,
|
||||||
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let started_processing_at = std::time::Instant::now();
|
let started_processing_at = std::time::Instant::now();
|
||||||
@@ -186,7 +186,7 @@ impl IndexScheduler {
|
|||||||
&document_changes,
|
&document_changes,
|
||||||
embedders,
|
embedders,
|
||||||
&|| must_stop_processing.get(),
|
&|| must_stop_processing.get(),
|
||||||
progress,
|
&progress,
|
||||||
)
|
)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?,
|
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?,
|
||||||
);
|
);
|
||||||
@@ -307,7 +307,7 @@ impl IndexScheduler {
|
|||||||
&document_changes,
|
&document_changes,
|
||||||
embedders,
|
embedders,
|
||||||
&|| must_stop_processing.get(),
|
&|| must_stop_processing.get(),
|
||||||
progress,
|
&progress,
|
||||||
)
|
)
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
||||||
);
|
);
|
||||||
@@ -465,7 +465,7 @@ impl IndexScheduler {
|
|||||||
&document_changes,
|
&document_changes,
|
||||||
embedders,
|
embedders,
|
||||||
&|| must_stop_processing.get(),
|
&|| must_stop_processing.get(),
|
||||||
progress,
|
&progress,
|
||||||
)
|
)
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
||||||
);
|
);
|
||||||
@@ -520,7 +520,7 @@ impl IndexScheduler {
|
|||||||
index_uid: index_uid.clone(),
|
index_uid: index_uid.clone(),
|
||||||
tasks: cleared_tasks,
|
tasks: cleared_tasks,
|
||||||
},
|
},
|
||||||
progress,
|
progress.clone(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (settings_tasks, _congestion) = self.apply_index_operation(
|
let (settings_tasks, _congestion) = self.apply_index_operation(
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ impl IndexScheduler {
|
|||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
||||||
let dst = temp_snapshot_dir.path().join("tasks");
|
let dst = temp_snapshot_dir.path().join("tasks");
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
self.env.copy_to_path(dst.join("data.mdb"), CompactionOption::Disabled)?;
|
self.env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||||
|
|
||||||
// 2.2 Create a read transaction on the index-scheduler
|
// 2.2 Create a read transaction on the index-scheduler
|
||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
@@ -80,7 +80,7 @@ impl IndexScheduler {
|
|||||||
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
index
|
index
|
||||||
.copy_to_path(dst.join("data.mdb"), CompactionOption::Disabled)
|
.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)
|
||||||
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,7 +90,7 @@ impl IndexScheduler {
|
|||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
||||||
let dst = temp_snapshot_dir.path().join("auth");
|
let dst = temp_snapshot_dir.path().join("auth");
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), CompactionOption::Disabled)?;
|
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||||
|
|
||||||
// 5. Copy and tarball the flat snapshot
|
// 5. Copy and tarball the flat snapshot
|
||||||
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
||||||
|
|||||||
@@ -12,14 +12,10 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::ProcessUpgrade)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::ProcessUpgrade)?;
|
||||||
|
|
||||||
|
enum UpgradeIndex {}
|
||||||
let indexes = self.index_names()?;
|
let indexes = self.index_names()?;
|
||||||
|
|
||||||
for (i, uid) in indexes.iter().enumerate() {
|
for (i, uid) in indexes.iter().enumerate() {
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
|
||||||
|
|
||||||
if must_stop_processing.get() {
|
|
||||||
return Err(Error::AbortedTask);
|
|
||||||
}
|
|
||||||
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
||||||
format!("Upgrading index `{uid}`"),
|
format!("Upgrading index `{uid}`"),
|
||||||
i as u32,
|
i as u32,
|
||||||
@@ -31,7 +27,6 @@ impl IndexScheduler {
|
|||||||
&mut index_wtxn,
|
&mut index_wtxn,
|
||||||
&index,
|
&index,
|
||||||
db_version,
|
db_version,
|
||||||
|| must_stop_processing.get(),
|
|
||||||
progress.clone(),
|
progress.clone(),
|
||||||
)
|
)
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
@@ -51,42 +46,4 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_rollback(&self, db_version: (u32, u32, u32), progress: &Progress) -> Result<()> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
tracing::info!(?db_version, "roll back index scheduler version");
|
|
||||||
self.version.set_version(&mut wtxn, db_version)?;
|
|
||||||
let db_path = self.scheduler.version_file_path.parent().unwrap();
|
|
||||||
wtxn.commit()?;
|
|
||||||
|
|
||||||
let indexes = self.index_names()?;
|
|
||||||
|
|
||||||
tracing::info!("roll backing all indexes");
|
|
||||||
for (i, uid) in indexes.iter().enumerate() {
|
|
||||||
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
|
||||||
format!("Rollbacking index `{uid}`"),
|
|
||||||
i as u32,
|
|
||||||
indexes.len() as u32,
|
|
||||||
));
|
|
||||||
let index_schd_rtxn = self.env.read_txn()?;
|
|
||||||
|
|
||||||
let rollback_outcome =
|
|
||||||
self.index_mapper.rollback_index(&index_schd_rtxn, uid, db_version)?;
|
|
||||||
if !rollback_outcome.succeeded() {
|
|
||||||
return Err(crate::Error::RollbackFailed { index: uid.clone(), rollback_outcome });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::info!(?db_path, ?db_version, "roll back version file");
|
|
||||||
meilisearch_types::versioning::create_version_file(
|
|
||||||
db_path,
|
|
||||||
db_version.0,
|
|
||||||
db_version.1,
|
|
||||||
db_version.2,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum UpgradeIndex {}
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -39,7 +40,7 @@ catto [0,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, stop reason: "task with id 1 of type `taskCancelation` cannot be batched", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, stop reason: "batched all enqueued tasks for index `beavero`", }
|
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
@@ -46,7 +47,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -49,8 +50,8 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"receivedDocuments":2,"indexedDocuments":0,"matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query"}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"documentAdditionOrUpdate":2,"taskCancelation":1},"indexUids":{"beavero":1,"wolfo":1}}, stop reason: "task with id 3 of type `taskCancelation` cannot be batched", }
|
1 {uid: 1, details: {"receivedDocuments":2,"indexedDocuments":0,"matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query"}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"documentAdditionOrUpdate":2,"taskCancelation":1},"indexUids":{"beavero":1,"wolfo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -41,7 +42,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, stop reason: "batched all enqueued tasks for index `beavero`", }
|
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
@@ -45,7 +46,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -38,7 +39,7 @@ canceled [0,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"matchedTasks":1,"canceledTasks":1,"originalFilter":"cancel dump"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"taskCancelation":1,"dumpCreation":1},"indexUids":{}}, stop reason: "task with id 1 of type `taskCancelation` cannot be batched", }
|
0 {uid: 0, details: {"matchedTasks":1,"canceledTasks":1,"originalFilter":"cancel dump"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"taskCancelation":1,"dumpCreation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"dumpUid":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"dumpCreation":1},"indexUids":{}}, stop reason: "task with id 0 of type `dumpCreation` cannot be batched", }
|
{uid: 0, details: {"dumpUid":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"dumpCreation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -40,7 +41,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, stop reason: "task with id 1 of type `taskCancelation` cannot be batched", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -40,8 +41,8 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"matchedTasks":1,"canceledTasks":0,"originalFilter":"test_query"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"taskCancelation":1},"indexUids":{}}, stop reason: "task with id 1 of type `taskCancelation` cannot be batched", }
|
1 {uid: 1, details: {"matchedTasks":1,"canceledTasks":0,"originalFilter":"test_query"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"taskCancelation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -35,7 +36,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -60,12 +61,12 @@ girafos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [5,]
|
[timestamp] [5,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, }
|
||||||
2 {uid: 2, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafos":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafos":1}}, }
|
||||||
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "batched all enqueued tasks for index `doggos`", }
|
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
4 {uid: 4, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"cattos":1}}, stop reason: "batched all enqueued tasks for index `cattos`", }
|
4 {uid: 4, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"cattos":1}}, }
|
||||||
5 {uid: 5, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"girafos":1}}, stop reason: "batched all enqueued tasks", }
|
5 {uid: 5, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"girafos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -41,7 +42,7 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -42,8 +43,8 @@ doggos [0,1,2,]
|
|||||||
[timestamp] [1,2,]
|
[timestamp] [1,2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, stop reason: "task with id 2 deletes the index", }
|
1 {uid: 1, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -37,7 +38,7 @@ doggos [0,1,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, stop reason: "task with id 1 deletes the index", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -41,7 +42,7 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -44,8 +45,8 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -45,9 +46,9 @@ cattos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, }
|
||||||
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexDeletion":1},"indexUids":{"doggos":1}}, stop reason: "task with id 2 deletes the index", }
|
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = false
|
### Autobatching Enabled = false
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -42,7 +43,7 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = false
|
### Autobatching Enabled = false
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -48,10 +49,10 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "reached configured batch limit of 1 tasks", }
|
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "reached configured batch limit of 1 tasks", }
|
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "batched all enqueued tasks", }
|
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = false
|
### Autobatching Enabled = false
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -44,8 +45,8 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "reached configured batch limit of 1 tasks", }
|
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = false
|
### Autobatching Enabled = false
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -46,9 +47,9 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "reached configured batch limit of 1 tasks", }
|
1 {uid: 1, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "reached configured batch limit of 1 tasks", }
|
2 {uid: 2, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -44,7 +45,7 @@ a: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -47,8 +48,8 @@ b: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -50,9 +51,9 @@ c: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -53,10 +54,10 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -60,11 +61,11 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [4,]
|
[timestamp] [4,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 4 of type `indexSwap` cannot be batched", }
|
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -56,10 +57,10 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -62,12 +63,12 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [5,]
|
[timestamp] [5,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 4 of type `indexSwap` cannot be batched", }
|
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
5 {uid: 5, details: {"swaps":[{"indexes":["a","c"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 5 of type `indexSwap` cannot be batched", }
|
5 {uid: 5, details: {"swaps":[{"indexes":["a","c"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -66,13 +67,13 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [6,]
|
[timestamp] [6,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 4 of type `indexSwap` cannot be batched", }
|
4 {uid: 4, details: {"swaps":[{"indexes":["a","b"]},{"indexes":["c","d"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
5 {uid: 5, details: {"swaps":[{"indexes":["a","c"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 5 of type `indexSwap` cannot be batched", }
|
5 {uid: 5, details: {"swaps":[{"indexes":["a","c"]}]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
6 {uid: 6, details: {"swaps":[]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "task with id 6 of type `indexSwap` cannot be batched", }
|
6 {uid: 6, details: {"swaps":[]}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -58,10 +59,10 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@@ -53,10 +54,10 @@ d: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, stop reason: "task with id 0 of type `indexCreation` cannot be batched", }
|
0 {uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"a":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, stop reason: "task with id 1 of type `indexCreation` cannot be batched", }
|
1 {uid: 1, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"b":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, stop reason: "task with id 2 of type `indexCreation` cannot be batched", }
|
2 {uid: 2, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"c":1}}, }
|
||||||
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, stop reason: "task with id 3 of type `indexCreation` cannot be batched", }
|
3 {uid: 3, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"d":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user