mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-19 21:10:34 +00:00
Compare commits
29 Commits
latest
...
disable-ar
Author | SHA1 | Date | |
---|---|---|---|
c0ea1a2c5a | |||
296ca1d58f | |||
d43a1644b1 | |||
8cbcb1476e | |||
79110bf7b1 | |||
3d5575a3e9 | |||
8524b59e83 | |||
ceec68cf7a | |||
f296c325ad | |||
df2fcac36c | |||
5035487208 | |||
8ccd090f40 | |||
834995b293 | |||
52b8abadf4 | |||
21f7c6f5af | |||
e937ba90c2 | |||
8607a166d0 | |||
abfce7d9b8 | |||
9e94b101ea | |||
57b26f8441 | |||
9505f15c85 | |||
285c72a960 | |||
9a33628331 | |||
1bd57a9a94 | |||
be676f9977 | |||
fa27327db5 | |||
cd4ba395e4 | |||
22bdec7e74 | |||
96ba62da36 |
14
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
14
.github/ISSUE_TEMPLATE/sprint_issue.md
vendored
@ -22,20 +22,6 @@ Related product discussion:
|
|||||||
|
|
||||||
<!---If necessary, create a list with technical/product steps-->
|
<!---If necessary, create a list with technical/product steps-->
|
||||||
|
|
||||||
### Are you modifying a database?
|
|
||||||
- [ ] If not, add the `no db change` label to your PR, and you're good to merge.
|
|
||||||
- [ ] If yes, add the `db change` label to your PR. You'll receive a message explaining you what to do.
|
|
||||||
|
|
||||||
### Reminders when modifying the API
|
|
||||||
|
|
||||||
- [ ] Update the openAPI file with utoipa:
|
|
||||||
- [ ] If a new module has been introduced, create a new structure deriving [the OpenAPI proc-macro](https://docs.rs/utoipa/latest/utoipa/derive.OpenApi.html) and nest it in the main [openAPI structure](https://github.com/meilisearch/meilisearch/blob/f2185438eed60fa32d25b15480c5ee064f6fba4a/crates/meilisearch/src/routes/mod.rs#L64-L78).
|
|
||||||
- [ ] If a new route has been introduced, add the [path decorator](https://docs.rs/utoipa/latest/utoipa/attr.path.html) to it and add the route at the top of the file in its openAPI structure.
|
|
||||||
- [ ] If a structure which is deserialized or serialized in the API has been introduced or modified, it must derive the [`schema`](https://docs.rs/utoipa/latest/utoipa/macro.schema.html) or the [`IntoParams`](https://docs.rs/utoipa/latest/utoipa/derive.IntoParams.html) proc-macro.
|
|
||||||
If it's a **new** structure you must also add it to the big list of structures [in the main `OpenApi` structure](https://github.com/meilisearch/meilisearch/blob/f2185438eed60fa32d25b15480c5ee064f6fba4a/crates/meilisearch/src/routes/mod.rs#L88).
|
|
||||||
- [ ] Once everything is done, start Meilisearch with the swagger flag: `cargo run --features swagger`, open `http://localhost:7700/scalar` on your browser, and ensure everything works as expected.
|
|
||||||
- For more info, refer to [this presentation](https://pitch.com/v/generating-the-openapi-file-jrn3nh).
|
|
||||||
|
|
||||||
### Reminders when modifying the Setting API
|
### Reminders when modifying the Setting API
|
||||||
|
|
||||||
<!--- Special steps to remind when adding a new index setting -->
|
<!--- Special steps to remind when adding a new index setting -->
|
||||||
|
39
.github/workflows/bench-manual.yml
vendored
39
.github/workflows/bench-manual.yml
vendored
@ -1,27 +1,28 @@
|
|||||||
name: Bench (manual)
|
name: Bench (manual)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
workload:
|
workload:
|
||||||
description: "The path to the workloads to execute (workloads/...)"
|
description: 'The path to the workloads to execute (workloads/...)'
|
||||||
required: true
|
required: true
|
||||||
default: "workloads/movies.json"
|
default: 'workloads/movies.json'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
WORKLOAD_NAME: ${{ github.event.inputs.workload }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmarks:
|
benchmarks:
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
||||||
|
run: |
|
||||||
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
||||||
|
|
||||||
- name: Run benchmarks - workload ${WORKLOAD_NAME} - branch ${{ github.ref }} - commit ${{ github.sha }}
|
|
||||||
run: |
|
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Manual [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- ${WORKLOAD_NAME}
|
|
||||||
|
136
.github/workflows/bench-pr.yml
vendored
136
.github/workflows/bench-pr.yml
vendored
@ -1,82 +1,82 @@
|
|||||||
name: Bench (PR)
|
name: Bench (PR)
|
||||||
on:
|
on:
|
||||||
issue_comment:
|
issue_comment:
|
||||||
types: [created]
|
types: [created]
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-benchmarks-on-comment:
|
run-benchmarks-on-comment:
|
||||||
if: startsWith(github.event.comment.body, '/bench')
|
if: startsWith(github.event.comment.body, '/bench')
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- name: Check permissions
|
- name: Check permissions
|
||||||
id: permission
|
id: permission
|
||||||
env:
|
env:
|
||||||
PR_AUTHOR: ${{github.event.issue.user.login }}
|
PR_AUTHOR: ${{github.event.issue.user.login }}
|
||||||
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
COMMENT_AUTHOR: ${{github.event.comment.user.login }}
|
||||||
REPOSITORY: ${{github.repository}}
|
REPOSITORY: ${{github.repository}}
|
||||||
PR_ID: ${{github.event.issue.number}}
|
PR_ID: ${{github.event.issue.number}}
|
||||||
run: |
|
run: |
|
||||||
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
PR_REPOSITORY=$(gh api /repos/"$REPOSITORY"/pulls/"$PR_ID" --jq .head.repo.full_name)
|
||||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
if $(gh api /repos/"$REPOSITORY"/collaborators/"$PR_AUTHOR"/permission --jq .user.permissions.push)
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::PR author authenticated"
|
echo "::notice title=Authentication success::PR author authenticated"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
echo "::error title=Authentication error::PR author doesn't have push permission on this repository"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
if $(gh api /repos/"$REPOSITORY"/collaborators/"$COMMENT_AUTHOR"/permission --jq .user.permissions.push)
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::Comment author authenticated"
|
echo "::notice title=Authentication success::Comment author authenticated"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
echo "::error title=Authentication error::Comment author doesn't have push permission on this repository"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
if [ "$PR_REPOSITORY" = "$REPOSITORY" ]
|
||||||
then
|
then
|
||||||
echo "::notice title=Authentication success::PR started from main repository"
|
echo "::notice title=Authentication success::PR started from main repository"
|
||||||
else
|
else
|
||||||
echo "::error title=Authentication error::PR started from a fork"
|
echo "::error title=Authentication error::PR started from a fork"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Check for Command
|
- name: Check for Command
|
||||||
id: command
|
id: command
|
||||||
uses: xt0rted/slash-command-action@v2
|
uses: xt0rted/slash-command-action@v2
|
||||||
with:
|
with:
|
||||||
command: bench
|
command: bench
|
||||||
reaction-type: "rocket"
|
reaction-type: "rocket"
|
||||||
repo-token: ${{ env.GH_TOKEN }}
|
repo-token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: xt0rted/pull-request-comment-branch@v3
|
- uses: xt0rted/pull-request-comment-branch@v3
|
||||||
id: comment-branch
|
id: comment-branch
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ env.GH_TOKEN }}
|
repo_token: ${{ env.GH_TOKEN }}
|
||||||
|
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
if: success()
|
if: success()
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
fetch-depth: 0 # fetch full history to be able to get main commit sha
|
||||||
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
ref: ${{ steps.comment-branch.outputs.head_ref }}
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
- name: Run benchmarks on PR ${{ github.event.issue.id }}
|
||||||
run: |
|
run: |
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" \
|
||||||
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
--dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" \
|
||||||
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
--reason "[Comment](${{ github.event.comment.html_url }}) on [#${{ github.event.issue.number }}](${{ github.event.issue.html_url }})" \
|
||||||
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
-- ${{ steps.command.outputs.command-arguments }} > benchlinks.txt
|
||||||
|
|
||||||
- name: Send comment in PR
|
- name: Send comment in PR
|
||||||
run: |
|
run: |
|
||||||
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
gh pr comment ${{github.event.issue.number}} --body-file benchlinks.txt
|
||||||
|
33
.github/workflows/bench-push-indexing.yml
vendored
33
.github/workflows/bench-push-indexing.yml
vendored
@ -1,22 +1,23 @@
|
|||||||
name: Indexing bench (push)
|
name: Indexing bench (push)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmarks:
|
benchmarks:
|
||||||
name: Run and upload benchmarks
|
name: Run and upload benchmarks
|
||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
timeout-minutes: 180 # 3h
|
timeout-minutes: 180 # 3h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
# Run benchmarks
|
||||||
|
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
||||||
|
run: |
|
||||||
|
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
||||||
|
|
||||||
# Run benchmarks
|
|
||||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch main - Commit ${{ github.sha }}
|
|
||||||
run: |
|
|
||||||
cargo xtask bench --api-key "${{ secrets.BENCHMARK_API_KEY }}" --dashboard-url "${{ vars.BENCHMARK_DASHBOARD_URL }}" --reason "Push on `main` [Run #${{ github.run_id }}](https://github.com/meilisearch/meilisearch/actions/runs/${{ github.run_id }})" -- workloads/*.json
|
|
||||||
|
8
.github/workflows/benchmarks-manual.yml
vendored
8
.github/workflows/benchmarks-manual.yml
vendored
@ -4,9 +4,9 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
dataset_name:
|
dataset_name:
|
||||||
description: "The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)"
|
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||||
required: false
|
required: false
|
||||||
default: "search_songs"
|
default: 'search_songs'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||||
@ -18,7 +18,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@ -67,7 +67,7 @@ jobs:
|
|||||||
out_dir: critcmp_results
|
out_dir: critcmp_results
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
2
.github/workflows/benchmarks-pr.yml
vendored
2
.github/workflows/benchmarks-pr.yml
vendored
@ -44,7 +44,7 @@ jobs:
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: benchmarks
|
runs-on: benchmarks
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ jobs:
|
|||||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||||
|
|
||||||
# Helper
|
# Helper
|
||||||
- name: "README: compare with another benchmark"
|
- name: 'README: compare with another benchmark'
|
||||||
run: |
|
run: |
|
||||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||||
echo 'How to compare this benchmark with another one?'
|
echo 'How to compare this benchmark with another one?'
|
||||||
|
100
.github/workflows/check-valid-milestone.yml
vendored
100
.github/workflows/check-valid-milestone.yml
vendored
@ -1,100 +0,0 @@
|
|||||||
name: PR Milestone Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [opened, reopened, edited, synchronize, milestoned, demilestoned]
|
|
||||||
branches:
|
|
||||||
- "main"
|
|
||||||
- "release-v*.*.*"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-milestone:
|
|
||||||
name: Check PR Milestone
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Validate PR milestone
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
script: |
|
|
||||||
// Get PR number directly from the event payload
|
|
||||||
const prNumber = context.payload.pull_request.number;
|
|
||||||
|
|
||||||
// Get PR details
|
|
||||||
const { data: prData } = await github.rest.pulls.get({
|
|
||||||
owner: 'meilisearch',
|
|
||||||
repo: 'meilisearch',
|
|
||||||
pull_number: prNumber
|
|
||||||
});
|
|
||||||
|
|
||||||
// Get base branch name
|
|
||||||
const baseBranch = prData.base.ref;
|
|
||||||
console.log(`Base branch: ${baseBranch}`);
|
|
||||||
|
|
||||||
// Get PR milestone
|
|
||||||
const prMilestone = prData.milestone;
|
|
||||||
if (!prMilestone) {
|
|
||||||
core.setFailed('PR must have a milestone assigned');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
console.log(`PR milestone: ${prMilestone.title}`);
|
|
||||||
|
|
||||||
// Validate milestone format: vx.y.z
|
|
||||||
const milestoneRegex = /^v\d+\.\d+\.\d+$/;
|
|
||||||
if (!milestoneRegex.test(prMilestone.title)) {
|
|
||||||
core.setFailed(`Milestone "${prMilestone.title}" does not follow the required format vx.y.z`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// For main branch PRs, check if the milestone is the highest one
|
|
||||||
if (baseBranch === 'main') {
|
|
||||||
// Get all milestones
|
|
||||||
const { data: milestones } = await github.rest.issues.listMilestones({
|
|
||||||
owner: 'meilisearch',
|
|
||||||
repo: 'meilisearch',
|
|
||||||
state: 'open',
|
|
||||||
sort: 'due_on',
|
|
||||||
direction: 'desc'
|
|
||||||
});
|
|
||||||
|
|
||||||
// Sort milestones by version number (vx.y.z)
|
|
||||||
const sortedMilestones = milestones
|
|
||||||
.filter(m => milestoneRegex.test(m.title))
|
|
||||||
.sort((a, b) => {
|
|
||||||
const versionA = a.title.substring(1).split('.').map(Number);
|
|
||||||
const versionB = b.title.substring(1).split('.').map(Number);
|
|
||||||
|
|
||||||
// Compare major version
|
|
||||||
if (versionA[0] !== versionB[0]) return versionB[0] - versionA[0];
|
|
||||||
// Compare minor version
|
|
||||||
if (versionA[1] !== versionB[1]) return versionB[1] - versionA[1];
|
|
||||||
// Compare patch version
|
|
||||||
return versionB[2] - versionA[2];
|
|
||||||
});
|
|
||||||
|
|
||||||
if (sortedMilestones.length === 0) {
|
|
||||||
core.setFailed('No valid milestones found in the repository. Please create at least one milestone with the format vx.y.z');
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const highestMilestone = sortedMilestones[0];
|
|
||||||
console.log(`Highest milestone: ${highestMilestone.title}`);
|
|
||||||
|
|
||||||
if (prMilestone.title !== highestMilestone.title) {
|
|
||||||
core.setFailed(`PRs targeting the main branch must use the highest milestone (${highestMilestone.title}), but this PR uses ${prMilestone.title}`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// For release branches, the milestone should match the branch version
|
|
||||||
const branchVersion = baseBranch.substring(8); // remove 'release-'
|
|
||||||
if (prMilestone.title !== branchVersion) {
|
|
||||||
core.setFailed(`PRs targeting release branch "${baseBranch}" must use the matching milestone "${branchVersion}", but this PR uses "${prMilestone.title}"`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('PR milestone validation passed!');
|
|
57
.github/workflows/db-change-comments.yml
vendored
57
.github/workflows/db-change-comments.yml
vendored
@ -1,57 +0,0 @@
|
|||||||
name: Comment when db change labels are added
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
env:
|
|
||||||
MESSAGE: |
|
|
||||||
### Hello, I'm a bot 🤖
|
|
||||||
|
|
||||||
You are receiving this message because you declared that this PR make changes to the Meilisearch database.
|
|
||||||
Depending on the nature of the change, additional actions might be required on your part. The following sections detail the additional actions depending on the nature of the change, please copy the relevant section in the description of your PR, and make sure to perform the required actions.
|
|
||||||
|
|
||||||
Thank you for contributing to Meilisearch :heart:
|
|
||||||
|
|
||||||
## This PR makes forward-compatible changes
|
|
||||||
|
|
||||||
*Forward-compatible changes are changes to the database such that databases created in an older version of Meilisearch are still valid in the new version of Meilisearch. They usually represent additive changes, like adding a new optional attribute or setting.*
|
|
||||||
|
|
||||||
- [ ] Detail the change to the DB format and why they are forward compatible
|
|
||||||
- [ ] Forward-compatibility: A database created before this PR and using the features touched by this PR was able to be opened by a Meilisearch produced by the code of this PR.
|
|
||||||
|
|
||||||
|
|
||||||
## This PR makes breaking changes
|
|
||||||
|
|
||||||
*Breaking changes are changes to the database such that databases created in an older version of Meilisearch need changes to remain valid in the new version of Meilisearch. This typically happens when the way to store the data changed (change of database, new required key, etc). This can also happen due to breaking changes in the API of an experimental feature. ⚠️ This kind of changes are more difficult to achieve safely, so proceed with caution and test dumpless upgrade right before merging the PR.*
|
|
||||||
|
|
||||||
- [ ] Detail the changes to the DB format,
|
|
||||||
- [ ] which are compatible, and why
|
|
||||||
- [ ] which are not compatible, why, and how they will be fixed up in the upgrade
|
|
||||||
- [ ] /!\ Ensure all the read operations still work!
|
|
||||||
- If the change happened in milli, you may need to check the version of the database before doing any read operation
|
|
||||||
- If the change happened in the index-scheduler, make sure the new code can immediately read the old database
|
|
||||||
- If the change happened in the meilisearch-auth database, reach out to the team; we don't know yet how to handle these changes
|
|
||||||
- [ ] Write the code to go from the old database to the new one
|
|
||||||
- If the change happened in milli, the upgrade function should be written and called [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/milli/src/update/upgrade/mod.rs#L24-L47)
|
|
||||||
- If the change happened in the index-scheduler, we've never done it yet, but the right place to do it should be [here](https://github.com/meilisearch/meilisearch/blob/3fd86e8d76d7d468b0095d679adb09211ca3b6c0/crates/index-scheduler/src/scheduler/process_upgrade/mod.rs#L13)
|
|
||||||
- [ ] Write an integration test [here](https://github.com/meilisearch/meilisearch/blob/main/crates/meilisearch/tests/upgrade/mod.rs) ensuring you can read the old database, upgrade to the new database, and read the new database as expected
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
add-comment:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: github.event.label.name == 'db change'
|
|
||||||
steps:
|
|
||||||
- name: Add comment
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
script: |
|
|
||||||
const message = process.env.MESSAGE;
|
|
||||||
github.rest.issues.createComment({
|
|
||||||
issue_number: context.issue.number,
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
body: message
|
|
||||||
})
|
|
28
.github/workflows/db-change-missing.yml
vendored
28
.github/workflows/db-change-missing.yml
vendored
@ -1,28 +0,0 @@
|
|||||||
name: Check db change labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types: [opened, synchronize, reopened, labeled, unlabeled]
|
|
||||||
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-labels:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Check db change labels
|
|
||||||
id: check_labels
|
|
||||||
run: |
|
|
||||||
URL=/repos/meilisearch/meilisearch/pulls/${{ github.event.pull_request.number }}/labels
|
|
||||||
echo ${{ github.event.pull_request.number }}
|
|
||||||
echo $URL
|
|
||||||
LABELS=$(gh api -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" /repos/meilisearch/meilisearch/issues/${{ github.event.pull_request.number }}/labels -q .[].name)
|
|
||||||
if [[ ! "$LABELS" =~ "db change" && ! "$LABELS" =~ "no db change" ]]; then
|
|
||||||
echo "::error::Pull request must contain either the 'db change' or 'no db change' label."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "The label is set"
|
|
||||||
fi
|
|
36
.github/workflows/flaky-tests.yml
vendored
36
.github/workflows/flaky-tests.yml
vendored
@ -9,22 +9,22 @@ jobs:
|
|||||||
flaky:
|
flaky:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Install cargo-flaky
|
- name: Install cargo-flaky
|
||||||
run: cargo install cargo-flaky
|
run: cargo install cargo-flaky
|
||||||
- name: Run cargo flaky in the dumps
|
- name: Run cargo flaky in the dumps
|
||||||
run: cd crates/dump; cargo flaky -i 100 --release
|
run: cd crates/dump; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in the index-scheduler
|
- name: Run cargo flaky in the index-scheduler
|
||||||
run: cd crates/index-scheduler; cargo flaky -i 100 --release
|
run: cd crates/index-scheduler; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in the auth
|
- name: Run cargo flaky in the auth
|
||||||
run: cd crates/meilisearch-auth; cargo flaky -i 100 --release
|
run: cd crates/meilisearch-auth; cargo flaky -i 100 --release
|
||||||
- name: Run cargo flaky in meilisearch
|
- name: Run cargo flaky in meilisearch
|
||||||
run: cd crates/meilisearch; cargo flaky -i 100 --release
|
run: cd crates/meilisearch; cargo flaky -i 100 --release
|
||||||
|
2
.github/workflows/fuzzer-indexing.yml
vendored
2
.github/workflows/fuzzer-indexing.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
timeout-minutes: 4320 # 72h
|
timeout-minutes: 4320 # 72h
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
|
|
||||||
|
46
.github/workflows/milestone-workflow.yml
vendored
46
.github/workflows/milestone-workflow.yml
vendored
@ -5,7 +5,6 @@ name: Milestone's workflow
|
|||||||
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
# For each Milestone created (not opened!), and if the release is NOT a patch release (only the patch changed)
|
||||||
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
# - the roadmap issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/roadmap-issue.md
|
||||||
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
# - the changelog issue is created, see https://github.com/meilisearch/engine-team/blob/main/issue-templates/changelog-issue.md
|
||||||
# - update the ruleset to add the current release version to the list of allowed versions and be able to use the merge queue.
|
|
||||||
|
|
||||||
# For each Milestone closed
|
# For each Milestone closed
|
||||||
# - the `release_version` label is created
|
# - the `release_version` label is created
|
||||||
@ -22,9 +21,10 @@ env:
|
|||||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# -----------------
|
|
||||||
# MILESTONE CREATED
|
# -----------------
|
||||||
# -----------------
|
# MILESTONE CREATED
|
||||||
|
# -----------------
|
||||||
|
|
||||||
get-release-version:
|
get-release-version:
|
||||||
if: github.event.action == 'created'
|
if: github.event.action == 'created'
|
||||||
@ -148,41 +148,9 @@ jobs:
|
|||||||
--body-file $ISSUE_TEMPLATE \
|
--body-file $ISSUE_TEMPLATE \
|
||||||
--milestone $MILESTONE_VERSION
|
--milestone $MILESTONE_VERSION
|
||||||
|
|
||||||
update-ruleset:
|
# ----------------
|
||||||
runs-on: ubuntu-latest
|
# MILESTONE CLOSED
|
||||||
if: github.event.action == 'created'
|
# ----------------
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Install jq
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y jq
|
|
||||||
- name: Update ruleset
|
|
||||||
env:
|
|
||||||
# gh api repos/meilisearch/meilisearch/rulesets --jq '.[] | {name: .name, id: .id}'
|
|
||||||
RULESET_ID: 4253297
|
|
||||||
BRANCH_NAME: ${{ github.event.inputs.branch_name }}
|
|
||||||
run: |
|
|
||||||
echo "RULESET_ID: ${{ env.RULESET_ID }}"
|
|
||||||
echo "BRANCH_NAME: ${{ env.BRANCH_NAME }}"
|
|
||||||
|
|
||||||
# Get current ruleset conditions
|
|
||||||
CONDITIONS=$(gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} --jq '{ conditions: .conditions }')
|
|
||||||
|
|
||||||
# Update the conditions by appending the milestone version
|
|
||||||
UPDATED_CONDITIONS=$(echo $CONDITIONS | jq '.conditions.ref_name.include += ["refs/heads/release-'${{ env.MILESTONE_VERSION }}'"]')
|
|
||||||
|
|
||||||
# Update the ruleset from stdin (-)
|
|
||||||
echo $UPDATED_CONDITIONS |
|
|
||||||
gh api repos/meilisearch/meilisearch/rulesets/${{ env.RULESET_ID }} \
|
|
||||||
--method PUT \
|
|
||||||
-H "Accept: application/vnd.github+json" \
|
|
||||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
|
||||||
--input -
|
|
||||||
|
|
||||||
# ----------------
|
|
||||||
# MILESTONE CLOSED
|
|
||||||
# ----------------
|
|
||||||
|
|
||||||
create-release-label:
|
create-release-label:
|
||||||
if: github.event.action == 'closed'
|
if: github.event.action == 'closed'
|
||||||
|
42
.github/workflows/publish-apt-brew-pkg.yml
vendored
42
.github/workflows/publish-apt-brew-pkg.yml
vendored
@ -18,28 +18,28 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: check-version
|
needs: check-version
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
steps:
|
steps:
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
run: cargo install cargo-deb
|
run: cargo install cargo-deb
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Build deb package
|
- name: Build deb package
|
||||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||||
- name: Upload debian pkg to release
|
- name: Upload debian pkg to release
|
||||||
uses: svenstaro/upload-release-action@2.7.0
|
uses: svenstaro/upload-release-action@2.7.0
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/debian/meilisearch.deb
|
file: target/debian/meilisearch.deb
|
||||||
asset_name: meilisearch.deb
|
asset_name: meilisearch.deb
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
- name: Upload debian pkg to apt repository
|
- name: Upload debian pkg to apt repository
|
||||||
run: curl -F package=@target/debian/meilisearch.deb https://${{ secrets.GEMFURY_PUSH_TOKEN }}@push.fury.io/meilisearch/
|
run: curl -F package=@target/debian/meilisearch.deb https://${{ secrets.GEMFURY_PUSH_TOKEN }}@push.fury.io/meilisearch/
|
||||||
|
|
||||||
homebrew:
|
homebrew:
|
||||||
name: Bump Homebrew formula
|
name: Bump Homebrew formula
|
||||||
|
74
.github/workflows/publish-binaries.yml
vendored
74
.github/workflows/publish-binaries.yml
vendored
@ -3,7 +3,7 @@ name: Publish binaries to GitHub release
|
|||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 2 * * *" # Every day at 2:00am
|
- cron: '0 2 * * *' # Every day at 2:00am
|
||||||
release:
|
release:
|
||||||
types: [published]
|
types: [published]
|
||||||
|
|
||||||
@ -37,26 +37,26 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: check-version
|
needs: check-version
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.7.0
|
uses: svenstaro/upload-release-action@2.7.0
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/meilisearch
|
file: target/release/meilisearch
|
||||||
asset_name: meilisearch-linux-amd64
|
asset_name: meilisearch-linux-amd64
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
publish-macos-windows:
|
publish-macos-windows:
|
||||||
name: Publish binary for ${{ matrix.os }}
|
name: Publish binary for ${{ matrix.os }}
|
||||||
@ -74,19 +74,19 @@ jobs:
|
|||||||
artifact_name: meilisearch.exe
|
artifact_name: meilisearch.exe
|
||||||
asset_name: meilisearch-windows-amd64.exe
|
asset_name: meilisearch-windows-amd64.exe
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Build
|
- name: Build
|
||||||
run: cargo build --release --locked
|
run: cargo build --release --locked
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.7.0
|
uses: svenstaro/upload-release-action@2.7.0
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/${{ matrix.artifact_name }}
|
file: target/release/${{ matrix.artifact_name }}
|
||||||
asset_name: ${{ matrix.asset_name }}
|
asset_name: ${{ matrix.asset_name }}
|
||||||
tag: ${{ github.ref }}
|
tag: ${{ github.ref }}
|
||||||
|
|
||||||
publish-macos-apple-silicon:
|
publish-macos-apple-silicon:
|
||||||
name: Publish binary for macOS silicon
|
name: Publish binary for macOS silicon
|
||||||
@ -101,7 +101,7 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
@ -127,8 +127,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
DEBIAN_FRONTEND: noninteractive
|
DEBIAN_FRONTEND: noninteractive
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
@ -148,7 +148,7 @@ jobs:
|
|||||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
apt-get update -y && apt-get install -y docker-ce
|
apt-get update -y && apt-get install -y docker-ce
|
||||||
- name: Installing Rust toolchain
|
- name: Installing Rust toolchain
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
|
19
.github/workflows/publish-docker-images.yml
vendored
19
.github/workflows/publish-docker-images.yml
vendored
@ -104,22 +104,3 @@ jobs:
|
|||||||
repository: meilisearch/meilisearch-cloud
|
repository: meilisearch/meilisearch-cloud
|
||||||
event-type: cloud-docker-build
|
event-type: cloud-docker-build
|
||||||
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
client-payload: '{ "meilisearch_version": "${{ github.ref_name }}", "stable": "${{ steps.check-tag-format.outputs.stable }}" }'
|
||||||
|
|
||||||
# Send notification to Swarmia to notify of a deployment: https://app.swarmia.com
|
|
||||||
# - name: 'Setup jq'
|
|
||||||
# uses: dcarbone/install-jq-action
|
|
||||||
# - name: Send deployment to Swarmia
|
|
||||||
# if: github.event_name == 'push' && success()
|
|
||||||
# run: |
|
|
||||||
# JSON_STRING=$( jq --null-input --compact-output \
|
|
||||||
# --arg version "${{ github.ref_name }}" \
|
|
||||||
# --arg appName "meilisearch" \
|
|
||||||
# --arg environment "production" \
|
|
||||||
# --arg commitSha "${{ github.sha }}" \
|
|
||||||
# --arg repositoryFullName "${{ github.repository }}" \
|
|
||||||
# '{"version": $version, "appName": $appName, "environment": $environment, "commitSha": $commitSha, "repositoryFullName": $repositoryFullName}' )
|
|
||||||
|
|
||||||
# curl -H "Authorization: ${{ secrets.SWARMIA_DEPLOYMENTS_AUTHORIZATION }}" \
|
|
||||||
# -H "Content-Type: application/json" \
|
|
||||||
# -d "$JSON_STRING" \
|
|
||||||
# https://hook.swarmia.com/deployments
|
|
||||||
|
28
.github/workflows/sdks-tests.yml
vendored
28
.github/workflows/sdks-tests.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: Define the Docker image we need to use
|
- name: Define the Docker image we need to use
|
||||||
id: define-image
|
id: define-image
|
||||||
run: |
|
run: |
|
||||||
@ -46,7 +46,7 @@ jobs:
|
|||||||
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
MEILISEARCH_VERSION: ${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dotnet
|
repository: meilisearch/meilisearch-dotnet
|
||||||
- name: Setup .NET Core
|
- name: Setup .NET Core
|
||||||
@ -75,7 +75,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-dart
|
repository: meilisearch/meilisearch-dart
|
||||||
- uses: dart-lang/setup-dart@v1
|
- uses: dart-lang/setup-dart@v1
|
||||||
@ -103,7 +103,7 @@ jobs:
|
|||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-go
|
repository: meilisearch/meilisearch-go
|
||||||
- name: Get dependencies
|
- name: Get dependencies
|
||||||
@ -129,7 +129,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-java
|
repository: meilisearch/meilisearch-java
|
||||||
- name: Set up Java
|
- name: Set up Java
|
||||||
@ -156,7 +156,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js
|
repository: meilisearch/meilisearch-js
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@ -191,7 +191,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-php
|
repository: meilisearch/meilisearch-php
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
@ -220,7 +220,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-python
|
repository: meilisearch/meilisearch-python
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
@ -245,7 +245,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-ruby
|
repository: meilisearch/meilisearch-ruby
|
||||||
- name: Set up Ruby 3
|
- name: Set up Ruby 3
|
||||||
@ -270,7 +270,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rust
|
repository: meilisearch/meilisearch-rust
|
||||||
- name: Build
|
- name: Build
|
||||||
@ -291,7 +291,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-swift
|
repository: meilisearch/meilisearch-swift
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
@ -314,7 +314,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-js-plugins
|
repository: meilisearch/meilisearch-js-plugins
|
||||||
- name: Setup node
|
- name: Setup node
|
||||||
@ -345,7 +345,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-rails
|
repository: meilisearch/meilisearch-rails
|
||||||
- name: Set up Ruby 3
|
- name: Set up Ruby 3
|
||||||
@ -369,7 +369,7 @@ jobs:
|
|||||||
ports:
|
ports:
|
||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
repository: meilisearch/meilisearch-symfony
|
repository: meilisearch/meilisearch-symfony
|
||||||
- name: Install PHP
|
- name: Install PHP
|
||||||
|
84
.github/workflows/test-suite.yml
vendored
84
.github/workflows/test-suite.yml
vendored
@ -4,9 +4,13 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
schedule:
|
schedule:
|
||||||
# Everyday at 5:00am
|
# Everyday at 5:00am
|
||||||
- cron: "0 5 * * *"
|
- cron: '0 5 * * *'
|
||||||
pull_request:
|
pull_request:
|
||||||
merge_group:
|
push:
|
||||||
|
# trying and staging branches are for Bors config
|
||||||
|
branches:
|
||||||
|
- trying
|
||||||
|
- staging
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
@ -15,11 +19,11 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test-linux:
|
test-linux:
|
||||||
name: Tests on ubuntu-22.04
|
name: Tests on ubuntu-20.04
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
@ -27,9 +31,9 @@ jobs:
|
|||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- name: Setup test with Rust stable
|
- name: Setup test with Rust stable
|
||||||
uses: dtolnay/rust-toolchain@1.85
|
uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@ -51,8 +55,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@ -68,8 +72,8 @@ jobs:
|
|||||||
name: Tests almost all features
|
name: Tests almost all features
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -77,51 +81,19 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo build with almost all features
|
- name: Run cargo build with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo build --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||||
- name: Run cargo test with almost all features
|
- name: Run cargo test with almost all features
|
||||||
run: |
|
run: |
|
||||||
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda,test-ollama)"
|
cargo test --workspace --locked --release --features "$(cargo xtask list-features --exclude-feature cuda)"
|
||||||
|
|
||||||
ollama-ubuntu:
|
|
||||||
name: Test with Ollama
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
MEILI_TEST_OLLAMA_SERVER: "http://localhost:11434"
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Install Ollama
|
|
||||||
run: |
|
|
||||||
curl -fsSL https://ollama.com/install.sh | sudo -E sh
|
|
||||||
- name: Start serving
|
|
||||||
run: |
|
|
||||||
# Run it in the background, there is no way to daemonise at the moment
|
|
||||||
ollama serve &
|
|
||||||
|
|
||||||
# A short pause is required before the HTTP port is opened
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# This endpoint blocks until ready
|
|
||||||
time curl -i http://localhost:11434
|
|
||||||
|
|
||||||
- name: Pull nomic-embed-text & all-minilm
|
|
||||||
run: |
|
|
||||||
ollama pull nomic-embed-text
|
|
||||||
ollama pull all-minilm
|
|
||||||
|
|
||||||
- name: Run cargo test
|
|
||||||
uses: actions-rs/cargo@v1
|
|
||||||
with:
|
|
||||||
command: test
|
|
||||||
args: --locked --release --all --features test-ollama ollama
|
|
||||||
|
|
||||||
test-disabled-tokenization:
|
test-disabled-tokenization:
|
||||||
name: Test disabled tokenization
|
name: Test disabled tokenization
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -129,7 +101,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install --assume-yes build-essential curl
|
apt-get install --assume-yes build-essential curl
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Run cargo tree without default features and check lindera is not present
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
run: |
|
run: |
|
||||||
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
if cargo tree -f '{p} {f}' -e normal --no-default-features | grep -qz lindera; then
|
||||||
@ -145,17 +117,17 @@ jobs:
|
|||||||
name: Run tests in debug
|
name: Run tests in debug
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
# Use ubuntu-22.04 to compile with glibc 2.35
|
# Use ubuntu-20.04 to compile with glibc 2.28
|
||||||
image: ubuntu:22.04
|
image: ubuntu:20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Install needed dependencies
|
- name: Install needed dependencies
|
||||||
run: |
|
run: |
|
||||||
apt-get update && apt-get install -y curl
|
apt-get update && apt-get install -y curl
|
||||||
apt-get install build-essential -y
|
apt-get install build-essential -y
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@ -167,12 +139,12 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@ -184,14 +156,14 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
toolchain: nightly-2024-07-09
|
toolchain: nightly-2024-07-09
|
||||||
override: true
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.7.8
|
uses: Swatinem/rust-cache@v2.7.7
|
||||||
- name: Run cargo fmt
|
- name: Run cargo fmt
|
||||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||||
|
@ -4,7 +4,7 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
new_version:
|
new_version:
|
||||||
description: "The new version (vX.Y.Z)"
|
description: 'The new version (vX.Y.Z)'
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -18,7 +18,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- uses: dtolnay/rust-toolchain@1.85
|
- uses: dtolnay/rust-toolchain@1.81
|
||||||
with:
|
with:
|
||||||
profile: minimal
|
profile: minimal
|
||||||
- name: Install sd
|
- name: Install sd
|
||||||
|
@ -95,11 +95,6 @@ Meilisearch follows the [cargo xtask](https://github.com/matklad/cargo-xtask) wo
|
|||||||
|
|
||||||
Run `cargo xtask --help` from the root of the repository to find out what is available.
|
Run `cargo xtask --help` from the root of the repository to find out what is available.
|
||||||
|
|
||||||
#### Update the openAPI file if the API changed
|
|
||||||
|
|
||||||
To update the openAPI file in the code, see [sprint_issue.md](https://github.com/meilisearch/meilisearch/blob/main/.github/ISSUE_TEMPLATE/sprint_issue.md#reminders-when-modifying-the-api).
|
|
||||||
If you want to update the openAPI file on the [open-api repository](https://github.com/meilisearch/open-api), see [update-openapi-issue.md](https://github.com/meilisearch/engine-team/blob/main/issue-templates/update-openapi-issue.md).
|
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
|
|
||||||
Meilisearch uses [`tracing`](https://lib.rs/crates/tracing) for logging purposes. Tracing logs are structured and can be displayed as JSON to the end user, so prefer passing arguments as fields rather than interpolating them in the message.
|
Meilisearch uses [`tracing`](https://lib.rs/crates/tracing) for logging purposes. Tracing logs are structured and can be displayed as JSON to the end user, so prefer passing arguments as fields rather than interpolating them in the message.
|
||||||
@ -150,7 +145,7 @@ Some notes on GitHub PRs:
|
|||||||
- The PR title should be accurate and descriptive of the changes.
|
- The PR title should be accurate and descriptive of the changes.
|
||||||
- [Convert your PR as a draft](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/changing-the-stage-of-a-pull-request) if your changes are a work in progress: no one will review it until you pass your PR as ready for review.<br>
|
- [Convert your PR as a draft](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/changing-the-stage-of-a-pull-request) if your changes are a work in progress: no one will review it until you pass your PR as ready for review.<br>
|
||||||
The draft PRs are recommended when you want to show that you are working on something and make your work visible.
|
The draft PRs are recommended when you want to show that you are working on something and make your work visible.
|
||||||
- The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [GitHub Merge Queues](https://github.blog/news-insights/product-news/github-merge-queue-is-generally-available/) to automatically enforce this requirement without the PR author having to rebase manually.
|
- The branch related to the PR must be **up-to-date with `main`** before merging. Fortunately, this project uses [Bors](https://github.com/bors-ng/bors-ng) to automatically enforce this requirement without the PR author having to rebase manually.
|
||||||
|
|
||||||
## Release Process (for internal team only)
|
## Release Process (for internal team only)
|
||||||
|
|
||||||
@ -158,7 +153,8 @@ Meilisearch tools follow the [Semantic Versioning Convention](https://semver.org
|
|||||||
|
|
||||||
### Automation to rebase and Merge the PRs
|
### Automation to rebase and Merge the PRs
|
||||||
|
|
||||||
This project uses GitHub Merge Queues that helps us manage pull requests merging.
|
This project integrates a bot that helps us manage pull requests merging.<br>
|
||||||
|
_[Read more about this](https://github.com/meilisearch/integration-guides/blob/main/resources/bors.md)._
|
||||||
|
|
||||||
### How to Publish a new Release
|
### How to Publish a new Release
|
||||||
|
|
||||||
|
3164
Cargo.lock
generated
3164
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -22,7 +22,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.15.2"
|
version = "1.13.2"
|
||||||
authors = [
|
authors = [
|
||||||
"Quentin de Quelen <quentin@dequelen.me>",
|
"Quentin de Quelen <quentin@dequelen.me>",
|
||||||
"Clément Renault <clement@meilisearch.com>",
|
"Clément Renault <clement@meilisearch.com>",
|
||||||
@ -36,12 +36,6 @@ license = "MIT"
|
|||||||
[profile.release]
|
[profile.release]
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
|
|
||||||
# We now compile heed without the NDEBUG define for better performance.
|
|
||||||
# However, we still enable debug assertions for a better detection of
|
|
||||||
# disk corruption on the cloud or in OSS.
|
|
||||||
[profile.release.package.heed]
|
|
||||||
debug-assertions = true
|
|
||||||
|
|
||||||
[profile.dev.package.flate2]
|
[profile.dev.package.flate2]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Compile
|
# Compile
|
||||||
FROM rust:1.85-alpine3.20 AS compiler
|
FROM rust:1.81.0-alpine3.20 AS compiler
|
||||||
|
|
||||||
RUN apk add -q --no-cache build-base openssl-dev
|
RUN apk add -q --no-cache build-base openssl-dev
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||||
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||||
<a href="https://github.com/meilisearch/meilisearch/queue"><img alt="Merge Queues enabled" src="https://img.shields.io/badge/Merge_Queues-enabled-%2357cf60?logo=github"></a>
|
<a href="https://ms-bors.herokuapp.com/repositories/52"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
<p align="center">⚡ A lightning-fast search engine that fits effortlessly into your apps, websites, and workflow 🔍</p>
|
||||||
@ -41,7 +41,7 @@
|
|||||||
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
- [**Movies**](https://where2watch.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=organization) — An application to help you find streaming platforms to watch movies using [hybrid search](https://www.meilisearch.com/solutions/hybrid-search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos).
|
||||||
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
- [**Ecommerce**](https://ecommerce.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Ecommerce website using disjunctive [facets](https://www.meilisearch.com/docs/learn/fine_tuning_results/faceted_search?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos), range and rating filtering, and pagination.
|
||||||
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
- [**Songs**](https://music.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search through 47 million of songs.
|
||||||
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
- [**SaaS**](https://saas.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) — Search for contacts, deals, and companies in this [multi-tenant](https://www.meilisearch.com/docs/learn/security/multitenancy_tenant_tokens?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=demos) CRM application.
|
||||||
|
|
||||||
See the list of all our example apps in our [demos repository](https://github.com/meilisearch/demos).
|
See the list of all our example apps in our [demos repository](https://github.com/meilisearch/demos).
|
||||||
|
|
||||||
@ -99,7 +99,7 @@ If you want to know more about the kind of data we collect and what we use it fo
|
|||||||
|
|
||||||
## đź“« Get in touch!
|
## đź“« Get in touch!
|
||||||
|
|
||||||
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
Meilisearch is a search engine created by [Meili]([https://www.welcometothejungle.com/en/companies/meilisearch](https://www.meilisearch.com/careers)), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
|
||||||
|
|
||||||
đź—ž [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
đź—ž [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
|
||||||
|
|
||||||
|
@ -1501,300 +1501,6 @@
|
|||||||
"title": "Task queue latency",
|
"title": "Task queue latency",
|
||||||
"type": "timeseries"
|
"type": "timeseries"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_used_size{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue used size in bytes",
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_size_until_stop_registering{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue available size until it stop receiving tasks.",
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisBorderShow": false,
|
|
||||||
"axisCenteredZero": false,
|
|
||||||
"axisColorMode": "text",
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 15,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"insertNulls": false,
|
|
||||||
"lineInterpolation": "linear",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "never",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {
|
|
||||||
"group": "A",
|
|
||||||
"mode": "none"
|
|
||||||
},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"value": 80
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "none"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 11,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 51
|
|
||||||
},
|
|
||||||
"id": 29,
|
|
||||||
"interval": "5s",
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "right",
|
|
||||||
"showLegend": true
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single",
|
|
||||||
"sort": "none"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pluginVersion": "8.1.4",
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": {
|
|
||||||
"type": "prometheus",
|
|
||||||
"uid": "${DS_PROMETHEUS}"
|
|
||||||
},
|
|
||||||
"editorMode": "builder",
|
|
||||||
"exemplar": true,
|
|
||||||
"expr": "meilisearch_task_queue_max_size{instance=\"$instance\", job=\"$job\"}",
|
|
||||||
"interval": "",
|
|
||||||
"legendFormat": "{{value}} ",
|
|
||||||
"range": true,
|
|
||||||
"refId": "A"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Task queue maximum possible size",
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"collapsed": true,
|
"collapsed": true,
|
||||||
"datasource": {
|
"datasource": {
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 578 KiB |
11
bors.toml
Normal file
11
bors.toml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
status = [
|
||||||
|
'Tests on ubuntu-20.04',
|
||||||
|
'Tests on macos-13',
|
||||||
|
'Tests on windows-2022',
|
||||||
|
'Run Clippy',
|
||||||
|
'Run Rustfmt',
|
||||||
|
'Run tests in debug',
|
||||||
|
]
|
||||||
|
pr_status = ['Milestone Check']
|
||||||
|
# 3 hours timeout
|
||||||
|
timeout-sec = 10800
|
@ -31,7 +31,7 @@ anyhow = "1.0.95"
|
|||||||
bytes = "1.9.0"
|
bytes = "1.9.0"
|
||||||
convert_case = "0.6.0"
|
convert_case = "0.6.0"
|
||||||
flate2 = "1.0.35"
|
flate2 = "1.0.35"
|
||||||
reqwest = { version = "0.12.15", features = ["blocking", "rustls-tls"], default-features = false }
|
reqwest = { version = "0.12.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["milli/all-tokenizations"]
|
default = ["milli/all-tokenizations"]
|
||||||
|
@ -10,9 +10,9 @@ use milli::documents::PrimaryKey;
|
|||||||
use milli::heed::{EnvOpenOptions, RwTxn};
|
use milli::heed::{EnvOpenOptions, RwTxn};
|
||||||
use milli::progress::Progress;
|
use milli::progress::Progress;
|
||||||
use milli::update::new::indexer;
|
use milli::update::new::indexer;
|
||||||
use milli::update::{IndexerConfig, Settings};
|
use milli::update::{IndexDocumentsMethod, IndexerConfig, Settings};
|
||||||
use milli::vector::EmbeddingConfigs;
|
use milli::vector::EmbeddingConfigs;
|
||||||
use milli::{FilterableAttributesRule, Index};
|
use milli::Index;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand_chacha::rand_core::SeedableRng;
|
use rand_chacha::rand_core::SeedableRng;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@ -35,8 +35,7 @@ fn setup_dir(path: impl AsRef<Path>) {
|
|||||||
fn setup_index() -> Index {
|
fn setup_index() -> Index {
|
||||||
let path = "benches.mmdb";
|
let path = "benches.mmdb";
|
||||||
setup_dir(path);
|
setup_dir(path);
|
||||||
let options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
let mut options = options.read_txn_without_tls();
|
|
||||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||||
options.max_readers(100);
|
options.max_readers(100);
|
||||||
Index::new(options, path, true).unwrap()
|
Index::new(options, path, true).unwrap()
|
||||||
@ -58,8 +57,7 @@ fn setup_settings<'t>(
|
|||||||
let searchable_fields = searchable_fields.iter().map(|s| s.to_string()).collect();
|
let searchable_fields = searchable_fields.iter().map(|s| s.to_string()).collect();
|
||||||
builder.set_searchable_fields(searchable_fields);
|
builder.set_searchable_fields(searchable_fields);
|
||||||
|
|
||||||
let filterable_fields =
|
let filterable_fields = filterable_fields.iter().map(|s| s.to_string()).collect();
|
||||||
filterable_fields.iter().map(|s| FilterableAttributesRule::Field(s.to_string())).collect();
|
|
||||||
builder.set_filterable_fields(filterable_fields);
|
builder.set_filterable_fields(filterable_fields);
|
||||||
|
|
||||||
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
let sortable_fields = sortable_fields.iter().map(|s| s.to_string()).collect();
|
||||||
@ -140,9 +138,10 @@ fn indexing_songs_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -206,9 +205,10 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -250,9 +250,10 @@ fn reindexing_songs_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -318,9 +319,10 @@ fn deleting_songs_in_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -394,9 +396,10 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_1_2, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_1_2, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -438,9 +441,10 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_3_4, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_3_4, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -478,9 +482,10 @@ fn indexing_songs_in_three_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_4_4, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS_4_4, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -544,10 +549,11 @@ fn indexing_songs_without_faceted_numbers(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
|
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -611,9 +617,10 @@ fn indexing_songs_without_faceted_fields(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_SONGS, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -677,9 +684,10 @@ fn indexing_wiki(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -742,9 +750,10 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -786,9 +795,10 @@ fn reindexing_wiki(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -853,9 +863,10 @@ fn deleting_wiki_in_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
let documents = utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -928,10 +939,11 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents =
|
let documents =
|
||||||
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_1_2, "csv");
|
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_1_2, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -973,10 +985,11 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents =
|
let documents =
|
||||||
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_3_4, "csv");
|
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_3_4, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1014,10 +1027,11 @@ fn indexing_wiki_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents =
|
let documents =
|
||||||
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_4_4, "csv");
|
utils::documents_from(datasets_paths::SMOL_WIKI_ARTICLES_4_4, "csv");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1081,9 +1095,10 @@ fn indexing_movies_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1146,9 +1161,10 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1190,9 +1206,10 @@ fn reindexing_movies_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1257,9 +1274,10 @@ fn deleting_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1369,9 +1387,10 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES_1_2, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES_1_2, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1413,9 +1432,10 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES_3_4, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES_3_4, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1453,9 +1473,10 @@ fn indexing_movies_in_three_batches(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::MOVIES_4_4, "json");
|
let documents = utils::documents_from(datasets_paths::MOVIES_4_4, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1542,9 +1563,10 @@ fn indexing_nested_movies_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1632,9 +1654,10 @@ fn deleting_nested_movies_in_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1714,9 +1737,10 @@ fn indexing_nested_movies_without_faceted_fields(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
let documents = utils::documents_from(datasets_paths::NESTED_MOVIES, "json");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1780,9 +1804,10 @@ fn indexing_geo(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1845,9 +1870,10 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1889,9 +1915,10 @@ fn reindexing_geo(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
@ -1956,9 +1983,10 @@ fn deleting_geo_in_batches_default(c: &mut Criterion) {
|
|||||||
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
let db_fields_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer =
|
||||||
|
indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
let documents = utils::documents_from(datasets_paths::SMOL_ALL_COUNTRIES, "jsonl");
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
|
@ -3,7 +3,6 @@ mod utils;
|
|||||||
|
|
||||||
use criterion::{criterion_group, criterion_main};
|
use criterion::{criterion_group, criterion_main};
|
||||||
use milli::update::Settings;
|
use milli::update::Settings;
|
||||||
use milli::FilterableAttributesRule;
|
|
||||||
use utils::Conf;
|
use utils::Conf;
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
@ -22,10 +21,8 @@ fn base_conf(builder: &mut Settings) {
|
|||||||
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||||
builder.set_searchable_fields(searchable_fields);
|
builder.set_searchable_fields(searchable_fields);
|
||||||
|
|
||||||
let filterable_fields = ["_geo", "population", "elevation"]
|
let filterable_fields =
|
||||||
.iter()
|
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||||
.map(|s| FilterableAttributesRule::Field(s.to_string()))
|
|
||||||
.collect();
|
|
||||||
builder.set_filterable_fields(filterable_fields);
|
builder.set_filterable_fields(filterable_fields);
|
||||||
|
|
||||||
let sortable_fields =
|
let sortable_fields =
|
||||||
|
@ -3,7 +3,6 @@ mod utils;
|
|||||||
|
|
||||||
use criterion::{criterion_group, criterion_main};
|
use criterion::{criterion_group, criterion_main};
|
||||||
use milli::update::Settings;
|
use milli::update::Settings;
|
||||||
use milli::FilterableAttributesRule;
|
|
||||||
use utils::Conf;
|
use utils::Conf;
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
@ -23,7 +22,7 @@ fn base_conf(builder: &mut Settings) {
|
|||||||
|
|
||||||
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|s| FilterableAttributesRule::Field(s.to_string()))
|
.map(|s| s.to_string())
|
||||||
.collect();
|
.collect();
|
||||||
builder.set_filterable_fields(faceted_fields);
|
builder.set_filterable_fields(faceted_fields);
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ use memmap2::Mmap;
|
|||||||
use milli::heed::EnvOpenOptions;
|
use milli::heed::EnvOpenOptions;
|
||||||
use milli::progress::Progress;
|
use milli::progress::Progress;
|
||||||
use milli::update::new::indexer;
|
use milli::update::new::indexer;
|
||||||
use milli::update::{IndexerConfig, Settings};
|
use milli::update::{IndexDocumentsMethod, IndexerConfig, Settings};
|
||||||
use milli::vector::EmbeddingConfigs;
|
use milli::vector::EmbeddingConfigs;
|
||||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -65,8 +65,7 @@ pub fn base_setup(conf: &Conf) -> Index {
|
|||||||
}
|
}
|
||||||
create_dir_all(conf.database_name).unwrap();
|
create_dir_all(conf.database_name).unwrap();
|
||||||
|
|
||||||
let options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
let mut options = options.read_txn_without_tls();
|
|
||||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||||
options.max_readers(100);
|
options.max_readers(100);
|
||||||
let index = Index::new(options, conf.database_name, true).unwrap();
|
let index = Index::new(options, conf.database_name, true).unwrap();
|
||||||
@ -100,8 +99,8 @@ pub fn base_setup(conf: &Conf) -> Index {
|
|||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer = indexer::DocumentOperation::new(IndexDocumentsMethod::ReplaceDocuments);
|
||||||
indexer.replace_documents(&documents).unwrap();
|
indexer.add_documents(&documents).unwrap();
|
||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let (document_changes, _operation_stats, primary_key) = indexer
|
let (document_changes, _operation_stats, primary_key) = indexer
|
||||||
|
@ -233,11 +233,11 @@ pub(crate) mod test {
|
|||||||
use meilisearch_types::features::{Network, Remote, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{Network, Remote, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||||
use meilisearch_types::keys::{Action, Key};
|
use meilisearch_types::keys::{Action, Key};
|
||||||
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::update::Setting;
|
use meilisearch_types::milli::update::Setting;
|
||||||
use meilisearch_types::milli::{self, FilterableAttributesRule};
|
|
||||||
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
use meilisearch_types::settings::{Checked, FacetingSettings, Settings};
|
||||||
use meilisearch_types::task_view::DetailsView;
|
use meilisearch_types::task_view::DetailsView;
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Details, Kind, Status};
|
use meilisearch_types::tasks::{Details, Kind, Status};
|
||||||
use serde_json::{json, Map, Value};
|
use serde_json::{json, Map, Value};
|
||||||
use time::macros::datetime;
|
use time::macros::datetime;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
@ -279,10 +279,7 @@ pub(crate) mod test {
|
|||||||
let settings = Settings {
|
let settings = Settings {
|
||||||
displayed_attributes: Setting::Set(vec![S("race"), S("name")]).into(),
|
displayed_attributes: Setting::Set(vec![S("race"), S("name")]).into(),
|
||||||
searchable_attributes: Setting::Set(vec![S("name"), S("race")]).into(),
|
searchable_attributes: Setting::Set(vec![S("name"), S("race")]).into(),
|
||||||
filterable_attributes: Setting::Set(vec![
|
filterable_attributes: Setting::Set(btreeset! { S("race"), S("age") }),
|
||||||
FilterableAttributesRule::Field(S("race")),
|
|
||||||
FilterableAttributesRule::Field(S("age")),
|
|
||||||
]),
|
|
||||||
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
sortable_attributes: Setting::Set(btreeset! { S("age") }),
|
||||||
ranking_rules: Setting::NotSet,
|
ranking_rules: Setting::NotSet,
|
||||||
stop_words: Setting::NotSet,
|
stop_words: Setting::NotSet,
|
||||||
@ -305,7 +302,6 @@ pub(crate) mod test {
|
|||||||
localized_attributes: Setting::NotSet,
|
localized_attributes: Setting::NotSet,
|
||||||
facet_search: Setting::NotSet,
|
facet_search: Setting::NotSet,
|
||||||
prefix_search: Setting::NotSet,
|
prefix_search: Setting::NotSet,
|
||||||
chat: Setting::NotSet,
|
|
||||||
_kind: std::marker::PhantomData,
|
_kind: std::marker::PhantomData,
|
||||||
};
|
};
|
||||||
settings.check()
|
settings.check()
|
||||||
@ -325,9 +321,6 @@ pub(crate) mod test {
|
|||||||
status: maplit::btreemap! { Status::Succeeded => 1 },
|
status: maplit::btreemap! { Status::Succeeded => 1 },
|
||||||
types: maplit::btreemap! { Kind::DocumentAdditionOrUpdate => 1 },
|
types: maplit::btreemap! { Kind::DocumentAdditionOrUpdate => 1 },
|
||||||
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
index_uids: maplit::btreemap! { "doggo".to_string() => 1 },
|
||||||
progress_trace: Default::default(),
|
|
||||||
write_channel_congestion: None,
|
|
||||||
internal_database_sizes: Default::default(),
|
|
||||||
},
|
},
|
||||||
enqueued_at: Some(BatchEnqueuedAt {
|
enqueued_at: Some(BatchEnqueuedAt {
|
||||||
earliest: datetime!(2022-11-11 0:00 UTC),
|
earliest: datetime!(2022-11-11 0:00 UTC),
|
||||||
@ -335,7 +328,6 @@ pub(crate) mod test {
|
|||||||
}),
|
}),
|
||||||
started_at: datetime!(2022-11-20 0:00 UTC),
|
started_at: datetime!(2022-11-20 0:00 UTC),
|
||||||
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
finished_at: Some(datetime!(2022-11-21 0:00 UTC)),
|
||||||
stop_reason: BatchStopReason::Unspecified.to_string(),
|
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
use std::num::NonZeroUsize;
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
use super::v4_to_v5::{CompatIndexV4ToV5, CompatV4ToV5};
|
||||||
@ -323,16 +322,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
v6::Settings {
|
v6::Settings {
|
||||||
displayed_attributes: v6::Setting::from(settings.displayed_attributes).into(),
|
displayed_attributes: v6::Setting::from(settings.displayed_attributes).into(),
|
||||||
searchable_attributes: v6::Setting::from(settings.searchable_attributes).into(),
|
searchable_attributes: v6::Setting::from(settings.searchable_attributes).into(),
|
||||||
filterable_attributes: match settings.filterable_attributes {
|
filterable_attributes: settings.filterable_attributes.into(),
|
||||||
v5::settings::Setting::Set(filterable_attributes) => v6::Setting::Set(
|
|
||||||
filterable_attributes
|
|
||||||
.into_iter()
|
|
||||||
.map(v6::FilterableAttributesRule::Field)
|
|
||||||
.collect(),
|
|
||||||
),
|
|
||||||
v5::settings::Setting::Reset => v6::Setting::Reset,
|
|
||||||
v5::settings::Setting::NotSet => v6::Setting::NotSet,
|
|
||||||
},
|
|
||||||
sortable_attributes: settings.sortable_attributes.into(),
|
sortable_attributes: settings.sortable_attributes.into(),
|
||||||
ranking_rules: {
|
ranking_rules: {
|
||||||
match settings.ranking_rules {
|
match settings.ranking_rules {
|
||||||
@ -374,7 +364,6 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
},
|
},
|
||||||
disable_on_words: typo.disable_on_words.into(),
|
disable_on_words: typo.disable_on_words.into(),
|
||||||
disable_on_attributes: typo.disable_on_attributes.into(),
|
disable_on_attributes: typo.disable_on_attributes.into(),
|
||||||
disable_on_numbers: v6::Setting::NotSet,
|
|
||||||
}),
|
}),
|
||||||
v5::Setting::Reset => v6::Setting::Reset,
|
v5::Setting::Reset => v6::Setting::Reset,
|
||||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||||
@ -389,13 +378,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
},
|
},
|
||||||
pagination: match settings.pagination {
|
pagination: match settings.pagination {
|
||||||
v5::Setting::Set(pagination) => v6::Setting::Set(v6::PaginationSettings {
|
v5::Setting::Set(pagination) => v6::Setting::Set(v6::PaginationSettings {
|
||||||
max_total_hits: match pagination.max_total_hits {
|
max_total_hits: pagination.max_total_hits.into(),
|
||||||
v5::Setting::Set(max_total_hits) => v6::Setting::Set(
|
|
||||||
max_total_hits.try_into().unwrap_or(NonZeroUsize::new(1).unwrap()),
|
|
||||||
),
|
|
||||||
v5::Setting::Reset => v6::Setting::Reset,
|
|
||||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
|
||||||
},
|
|
||||||
}),
|
}),
|
||||||
v5::Setting::Reset => v6::Setting::Reset,
|
v5::Setting::Reset => v6::Setting::Reset,
|
||||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||||
@ -405,7 +388,6 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
|||||||
search_cutoff_ms: v6::Setting::NotSet,
|
search_cutoff_ms: v6::Setting::NotSet,
|
||||||
facet_search: v6::Setting::NotSet,
|
facet_search: v6::Setting::NotSet,
|
||||||
prefix_search: v6::Setting::NotSet,
|
prefix_search: v6::Setting::NotSet,
|
||||||
chat: v6::Setting::NotSet,
|
|
||||||
_kind: std::marker::PhantomData,
|
_kind: std::marker::PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
source: crates/dump/src/reader/mod.rs
|
source: dump/src/reader/mod.rs
|
||||||
expression: vector_index.settings().unwrap()
|
expression: vector_index.settings().unwrap()
|
||||||
---
|
---
|
||||||
{
|
{
|
||||||
@ -49,7 +49,6 @@ expression: vector_index.settings().unwrap()
|
|||||||
"source": "huggingFace",
|
"source": "huggingFace",
|
||||||
"model": "BAAI/bge-base-en-v1.5",
|
"model": "BAAI/bge-base-en-v1.5",
|
||||||
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
"revision": "617ca489d9e86b49b8167676d8220688b99db36e",
|
||||||
"pooling": "forceMean",
|
|
||||||
"documentTemplate": "{% for field in fields %} {{ field.name }}: {{ field.value }}\n{% endfor %}"
|
"documentTemplate": "{% for field in fields %} {{ field.name }}: {{ field.value }}\n{% endfor %}"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -108,7 +108,7 @@ where
|
|||||||
/// not supported on untagged enums.
|
/// not supported on untagged enums.
|
||||||
struct StarOrVisitor<T>(PhantomData<T>);
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||||
where
|
where
|
||||||
T: FromStr<Err = FE>,
|
T: FromStr<Err = FE>,
|
||||||
FE: Display,
|
FE: Display,
|
||||||
|
@ -99,7 +99,7 @@ impl Task {
|
|||||||
/// Return true when a task is finished.
|
/// Return true when a task is finished.
|
||||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
self.events.last().is_some_and(|event| {
|
self.events.last().map_or(false, |event| {
|
||||||
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
|
matches!(event, TaskEvent::Succeded { .. } | TaskEvent::Failed { .. })
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ where
|
|||||||
/// not supported on untagged enums.
|
/// not supported on untagged enums.
|
||||||
struct StarOrVisitor<T>(PhantomData<T>);
|
struct StarOrVisitor<T>(PhantomData<T>);
|
||||||
|
|
||||||
impl<T, FE> Visitor<'_> for StarOrVisitor<T>
|
impl<'de, T, FE> Visitor<'de> for StarOrVisitor<T>
|
||||||
where
|
where
|
||||||
T: FromStr<Err = FE>,
|
T: FromStr<Err = FE>,
|
||||||
FE: Display,
|
FE: Display,
|
||||||
|
@ -114,7 +114,7 @@ impl Task {
|
|||||||
/// Return true when a task is finished.
|
/// Return true when a task is finished.
|
||||||
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
/// A task is finished when its last state is either `Succeeded` or `Failed`.
|
||||||
pub fn is_finished(&self) -> bool {
|
pub fn is_finished(&self) -> bool {
|
||||||
self.events.last().is_some_and(|event| {
|
self.events.last().map_or(false, |event| {
|
||||||
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
|
matches!(event, TaskEvent::Succeeded { .. } | TaskEvent::Failed { .. })
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,6 @@ use std::io::{BufRead, BufReader, ErrorKind};
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
pub use meilisearch_types::milli;
|
pub use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::vector::hf::OverridePooling;
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
@ -46,8 +45,6 @@ pub type ResponseError = meilisearch_types::error::ResponseError;
|
|||||||
pub type Code = meilisearch_types::error::Code;
|
pub type Code = meilisearch_types::error::Code;
|
||||||
pub type RankingRuleView = meilisearch_types::settings::RankingRuleView;
|
pub type RankingRuleView = meilisearch_types::settings::RankingRuleView;
|
||||||
|
|
||||||
pub type FilterableAttributesRule = meilisearch_types::milli::FilterableAttributesRule;
|
|
||||||
|
|
||||||
pub struct V6Reader {
|
pub struct V6Reader {
|
||||||
dump: TempDir,
|
dump: TempDir,
|
||||||
instance_uid: Option<Uuid>,
|
instance_uid: Option<Uuid>,
|
||||||
@ -255,29 +252,7 @@ impl V6IndexReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn settings(&mut self) -> Result<Settings<Checked>> {
|
pub fn settings(&mut self) -> Result<Settings<Checked>> {
|
||||||
let mut settings: Settings<Unchecked> = serde_json::from_reader(&mut self.settings)?;
|
let settings: Settings<Unchecked> = serde_json::from_reader(&mut self.settings)?;
|
||||||
patch_embedders(&mut settings);
|
|
||||||
Ok(settings.check())
|
Ok(settings.check())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn patch_embedders(settings: &mut Settings<Unchecked>) {
|
|
||||||
if let Setting::Set(embedders) = &mut settings.embedders {
|
|
||||||
for settings in embedders.values_mut() {
|
|
||||||
let Setting::Set(settings) = &mut settings.inner else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
if settings.source != Setting::Set(milli::vector::settings::EmbedderSource::HuggingFace)
|
|
||||||
{
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
settings.pooling = match settings.pooling {
|
|
||||||
Setting::Set(pooling) => Setting::Set(pooling),
|
|
||||||
// if the pooling for a hugging face embedder is not set, force it to `forceMean`
|
|
||||||
// for backward compatibility with v1.13
|
|
||||||
// dumps created in v1.14 and up will have the setting set for hugging face embedders
|
|
||||||
Setting::Reset | Setting::NotSet => Setting::Set(OverridePooling::ForceMean),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -30,25 +30,6 @@ pub enum Condition<'a> {
|
|||||||
StartsWith { keyword: Token<'a>, word: Token<'a> },
|
StartsWith { keyword: Token<'a>, word: Token<'a> },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Condition<'_> {
|
|
||||||
pub fn operator(&self) -> &str {
|
|
||||||
match self {
|
|
||||||
Condition::GreaterThan(_) => ">",
|
|
||||||
Condition::GreaterThanOrEqual(_) => ">=",
|
|
||||||
Condition::Equal(_) => "=",
|
|
||||||
Condition::NotEqual(_) => "!=",
|
|
||||||
Condition::Null => "IS NULL",
|
|
||||||
Condition::Empty => "IS EMPTY",
|
|
||||||
Condition::Exists => "EXISTS",
|
|
||||||
Condition::LowerThan(_) => "<",
|
|
||||||
Condition::LowerThanOrEqual(_) => "<=",
|
|
||||||
Condition::Between { .. } => "TO",
|
|
||||||
Condition::Contains { .. } => "CONTAINS",
|
|
||||||
Condition::StartsWith { .. } => "STARTS WITH",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// condition = value ("==" | ">" ...) value
|
/// condition = value ("==" | ">" ...) value
|
||||||
pub fn parse_condition(input: Span) -> IResult<FilterCondition> {
|
pub fn parse_condition(input: Span) -> IResult<FilterCondition> {
|
||||||
let operator = alt((tag("<="), tag(">="), tag("!="), tag("<"), tag(">"), tag("=")));
|
let operator = alt((tag("<="), tag(">="), tag("!="), tag("<"), tag(">"), tag("=")));
|
||||||
|
@ -35,7 +35,7 @@ impl<E> NomErrorExt<E> for nom::Err<E> {
|
|||||||
pub fn cut_with_err<'a, O>(
|
pub fn cut_with_err<'a, O>(
|
||||||
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||||
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
||||||
) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
|
) -> impl FnMut(Span<'a>) -> IResult<O> {
|
||||||
move |input| match parser.parse(input) {
|
move |input| match parser.parse(input) {
|
||||||
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
||||||
rest => rest,
|
rest => rest,
|
||||||
@ -121,7 +121,7 @@ impl<'a> ParseError<Span<'a>> for Error<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for Error<'_> {
|
impl<'a> Display for Error<'a> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let input = self.context.fragment();
|
let input = self.context.fragment();
|
||||||
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
||||||
|
@ -80,7 +80,7 @@ pub struct Token<'a> {
|
|||||||
value: Option<String>,
|
value: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for Token<'_> {
|
impl<'a> PartialEq for Token<'a> {
|
||||||
fn eq(&self, other: &Self) -> bool {
|
fn eq(&self, other: &Self) -> bool {
|
||||||
self.span.fragment() == other.span.fragment()
|
self.span.fragment() == other.span.fragment()
|
||||||
}
|
}
|
||||||
@ -226,7 +226,7 @@ impl<'a> FilterCondition<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error<'a>> {
|
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> {
|
||||||
if input.trim().is_empty() {
|
if input.trim().is_empty() {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
@ -527,7 +527,7 @@ pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
|||||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for FilterCondition<'_> {
|
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
FilterCondition::Not(filter) => {
|
FilterCondition::Not(filter) => {
|
||||||
@ -576,8 +576,7 @@ impl std::fmt::Display for FilterCondition<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<'a> std::fmt::Display for Condition<'a> {
|
||||||
impl std::fmt::Display for Condition<'_> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||||
@ -595,8 +594,7 @@ impl std::fmt::Display for Condition<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<'a> std::fmt::Display for Token<'a> {
|
||||||
impl std::fmt::Display for Token<'_> {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
write!(f, "{{{}}}", self.value())
|
write!(f, "{{{}}}", self.value())
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ fn quoted_by(quote: char, input: Span) -> IResult<Token> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
||||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<'a, Token<'a>> {
|
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> {
|
||||||
let (input, word): (_, Token<'a>) =
|
let (input, word): (_, Token<'a>) =
|
||||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||||
if is_keyword(word.value()) {
|
if is_keyword(word.value()) {
|
||||||
|
@ -12,7 +12,7 @@ use milli::documents::mmap_from_objects;
|
|||||||
use milli::heed::EnvOpenOptions;
|
use milli::heed::EnvOpenOptions;
|
||||||
use milli::progress::Progress;
|
use milli::progress::Progress;
|
||||||
use milli::update::new::indexer;
|
use milli::update::new::indexer;
|
||||||
use milli::update::IndexerConfig;
|
use milli::update::{IndexDocumentsMethod, IndexerConfig};
|
||||||
use milli::vector::EmbeddingConfigs;
|
use milli::vector::EmbeddingConfigs;
|
||||||
use milli::Index;
|
use milli::Index;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
@ -57,8 +57,7 @@ fn main() {
|
|||||||
let opt = opt.clone();
|
let opt = opt.clone();
|
||||||
|
|
||||||
let handle = std::thread::spawn(move || {
|
let handle = std::thread::spawn(move || {
|
||||||
let options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
let mut options = options.read_txn_without_tls();
|
|
||||||
options.map_size(1024 * 1024 * 1024 * 1024);
|
options.map_size(1024 * 1024 * 1024 * 1024);
|
||||||
let tempdir = match opt.path {
|
let tempdir = match opt.path {
|
||||||
Some(path) => TempDir::new_in(path).unwrap(),
|
Some(path) => TempDir::new_in(path).unwrap(),
|
||||||
@ -90,7 +89,9 @@ fn main() {
|
|||||||
|
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
let embedders = EmbeddingConfigs::default();
|
let embedders = EmbeddingConfigs::default();
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer = indexer::DocumentOperation::new(
|
||||||
|
IndexDocumentsMethod::ReplaceDocuments,
|
||||||
|
);
|
||||||
|
|
||||||
let mut operations = Vec::new();
|
let mut operations = Vec::new();
|
||||||
for op in batch.0 {
|
for op in batch.0 {
|
||||||
@ -114,7 +115,7 @@ fn main() {
|
|||||||
for op in &operations {
|
for op in &operations {
|
||||||
match op {
|
match op {
|
||||||
Either::Left(documents) => {
|
Either::Left(documents) => {
|
||||||
indexer.replace_documents(documents).unwrap()
|
indexer.add_documents(documents).unwrap()
|
||||||
}
|
}
|
||||||
Either::Right(ids) => indexer.delete_documents(ids),
|
Either::Right(ids) => indexer.delete_documents(ids),
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,6 @@ license.workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = "1.0.95"
|
anyhow = "1.0.95"
|
||||||
bincode = "1.3.3"
|
bincode = "1.3.3"
|
||||||
byte-unit = "5.1.6"
|
|
||||||
bumpalo = "3.16.0"
|
bumpalo = "3.16.0"
|
||||||
bumparaw-collections = "0.1.4"
|
bumparaw-collections = "0.1.4"
|
||||||
convert_case = "0.6.0"
|
convert_case = "0.6.0"
|
||||||
@ -23,7 +22,6 @@ dump = { path = "../dump" }
|
|||||||
enum-iterator = "2.1.0"
|
enum-iterator = "2.1.0"
|
||||||
file-store = { path = "../file-store" }
|
file-store = { path = "../file-store" }
|
||||||
flate2 = "1.0.35"
|
flate2 = "1.0.35"
|
||||||
indexmap = "2.7.0"
|
|
||||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||||
meilisearch-types = { path = "../meilisearch-types" }
|
meilisearch-types = { path = "../meilisearch-types" }
|
||||||
memmap2 = "0.9.5"
|
memmap2 = "0.9.5"
|
||||||
@ -31,7 +29,7 @@ page_size = "0.6.0"
|
|||||||
rayon = "1.10.0"
|
rayon = "1.10.0"
|
||||||
roaring = { version = "0.10.10", features = ["serde"] }
|
roaring = { version = "0.10.10", features = ["serde"] }
|
||||||
serde = { version = "1.0.217", features = ["derive"] }
|
serde = { version = "1.0.217", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.138", features = ["preserve_order"] }
|
serde_json = { version = "1.0.135", features = ["preserve_order"] }
|
||||||
synchronoise = "1.0.1"
|
synchronoise = "1.0.1"
|
||||||
tempfile = "3.15.0"
|
tempfile = "3.15.0"
|
||||||
thiserror = "2.0.9"
|
thiserror = "2.0.9"
|
||||||
@ -46,8 +44,9 @@ ureq = "2.12.1"
|
|||||||
uuid = { version = "1.11.0", features = ["serde", "v4"] }
|
uuid = { version = "1.11.0", features = ["serde", "v4"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
arroy = { git = "https://github.com/meilisearch/arroy", rev = "55fb0e8006f4f00ad3197b06bda348133f6ffffa" }
|
||||||
big_s = "1.0.2"
|
big_s = "1.0.2"
|
||||||
crossbeam-channel = "0.5.15"
|
crossbeam-channel = "0.5.14"
|
||||||
# fixed version due to format breakages in v1.40
|
# fixed version due to format breakages in v1.40
|
||||||
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
insta = { version = "=1.39.0", features = ["json", "redactions"] }
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
|
@ -2,7 +2,6 @@ use std::fmt::Display;
|
|||||||
|
|
||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::error::{Code, ErrorCode};
|
use meilisearch_types::error::{Code, ErrorCode};
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use meilisearch_types::{heed, milli};
|
use meilisearch_types::{heed, milli};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
@ -151,24 +150,8 @@ pub enum Error {
|
|||||||
CorruptedTaskQueue,
|
CorruptedTaskQueue,
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
DatabaseUpgrade(Box<Self>),
|
DatabaseUpgrade(Box<Self>),
|
||||||
#[error("Failed to rollback for index `{index}`: {rollback_outcome} ")]
|
|
||||||
RollbackFailed { index: String, rollback_outcome: RollbackOutcome },
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
UnrecoverableError(Box<Self>),
|
UnrecoverableError(Box<Self>),
|
||||||
#[error("The index scheduler is in version v{}.{}.{}, but Meilisearch is in version v{}.{}.{}.\n - hint: start the correct version of Meilisearch, or consider updating your database. See also <https://www.meilisearch.com/docs/learn/update_and_migration/updating>",
|
|
||||||
index_scheduler_version.0, index_scheduler_version.1, index_scheduler_version.2,
|
|
||||||
package_version.0, package_version.1, package_version.2)]
|
|
||||||
IndexSchedulerVersionMismatch {
|
|
||||||
index_scheduler_version: (u32, u32, u32),
|
|
||||||
package_version: (u32, u32, u32),
|
|
||||||
},
|
|
||||||
#[error("Index `{index}` is in version v{}.{}.{}, but Meilisearch is in version v{}.{}.{}.\n - note: this is an internal error, please consider filing a bug report: <https://github.com/meilisearch/meilisearch/issues/new?template=bug_report.md>",
|
|
||||||
index_version.0, index_version.1, index_version.2, package_version.0, package_version.1, package_version.2)]
|
|
||||||
IndexVersionMismatch {
|
|
||||||
index: String,
|
|
||||||
index_version: (u32, u32, u32),
|
|
||||||
package_version: (u32, u32, u32),
|
|
||||||
},
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
HeedTransaction(heed::Error),
|
HeedTransaction(heed::Error),
|
||||||
|
|
||||||
@ -226,9 +209,6 @@ impl Error {
|
|||||||
| Error::CorruptedTaskQueue
|
| Error::CorruptedTaskQueue
|
||||||
| Error::DatabaseUpgrade(_)
|
| Error::DatabaseUpgrade(_)
|
||||||
| Error::UnrecoverableError(_)
|
| Error::UnrecoverableError(_)
|
||||||
| Error::IndexSchedulerVersionMismatch { .. }
|
|
||||||
| Error::IndexVersionMismatch { .. }
|
|
||||||
| Error::RollbackFailed { .. }
|
|
||||||
| Error::HeedTransaction(_) => false,
|
| Error::HeedTransaction(_) => false,
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
Error::PlannedFailure => false,
|
Error::PlannedFailure => false,
|
||||||
@ -294,10 +274,7 @@ impl ErrorCode for Error {
|
|||||||
Error::CorruptedTaskQueue => Code::Internal,
|
Error::CorruptedTaskQueue => Code::Internal,
|
||||||
Error::CorruptedDump => Code::Internal,
|
Error::CorruptedDump => Code::Internal,
|
||||||
Error::DatabaseUpgrade(_) => Code::Internal,
|
Error::DatabaseUpgrade(_) => Code::Internal,
|
||||||
Error::RollbackFailed { .. } => Code::Internal,
|
|
||||||
Error::UnrecoverableError(_) => Code::Internal,
|
Error::UnrecoverableError(_) => Code::Internal,
|
||||||
Error::IndexSchedulerVersionMismatch { .. } => Code::Internal,
|
|
||||||
Error::IndexVersionMismatch { .. } => Code::Internal,
|
|
||||||
Error::CreateBatch(_) => Code::Internal,
|
Error::CreateBatch(_) => Code::Internal,
|
||||||
|
|
||||||
// This one should never be seen by the end user
|
// This one should never be seen by the end user
|
||||||
|
@ -2,7 +2,7 @@ use std::sync::{Arc, RwLock};
|
|||||||
|
|
||||||
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RwTxn};
|
||||||
|
|
||||||
use crate::error::FeatureNotEnabledError;
|
use crate::error::FeatureNotEnabledError;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
@ -118,32 +118,6 @@ impl RoFeatures {
|
|||||||
.into())
|
.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn check_composite_embedders(&self, disabled_action: &'static str) -> Result<()> {
|
|
||||||
if self.runtime.composite_embedders {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(FeatureNotEnabledError {
|
|
||||||
disabled_action,
|
|
||||||
feature: "composite embedders",
|
|
||||||
issue_link: "https://github.com/orgs/meilisearch/discussions/816",
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check_chat_completions(&self, disabled_action: &'static str) -> Result<()> {
|
|
||||||
if self.runtime.chat_completions {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(FeatureNotEnabledError {
|
|
||||||
disabled_action,
|
|
||||||
feature: "chat completions",
|
|
||||||
issue_link: "https://github.com/orgs/meilisearch/discussions/835",
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FeatureData {
|
impl FeatureData {
|
||||||
@ -152,7 +126,7 @@ impl FeatureData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env: &Env<WithoutTls>,
|
env: &Env,
|
||||||
wtxn: &mut RwTxn,
|
wtxn: &mut RwTxn,
|
||||||
instance_features: InstanceTogglableFeatures,
|
instance_features: InstanceTogglableFeatures,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
@ -304,8 +304,7 @@ fn create_or_open_index(
|
|||||||
map_size: usize,
|
map_size: usize,
|
||||||
creation: bool,
|
creation: bool,
|
||||||
) -> Result<Index> {
|
) -> Result<Index> {
|
||||||
let options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
let mut options = options.read_txn_without_tls();
|
|
||||||
options.map_size(clamp_to_page_size(map_size));
|
options.map_size(clamp_to_page_size(map_size));
|
||||||
|
|
||||||
// You can find more details about this experimental
|
// You can find more details about this experimental
|
||||||
@ -334,7 +333,7 @@ fn create_or_open_index(
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
use meilisearch_types::heed::Env;
|
||||||
use meilisearch_types::Index;
|
use meilisearch_types::Index;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
@ -344,7 +343,7 @@ mod tests {
|
|||||||
use crate::IndexScheduler;
|
use crate::IndexScheduler;
|
||||||
|
|
||||||
impl IndexMapper {
|
impl IndexMapper {
|
||||||
fn test() -> (Self, Env<WithoutTls>, IndexSchedulerHandle) {
|
fn test() -> (Self, Env, IndexSchedulerHandle) {
|
||||||
let (index_scheduler, handle) = IndexScheduler::test(true, vec![]);
|
let (index_scheduler, handle) = IndexScheduler::test(true, vec![]);
|
||||||
(index_scheduler.index_mapper, index_scheduler.env, handle)
|
(index_scheduler.index_mapper, index_scheduler.env, handle)
|
||||||
}
|
}
|
||||||
|
@ -4,10 +4,9 @@ use std::time::Duration;
|
|||||||
use std::{fs, thread};
|
use std::{fs, thread};
|
||||||
|
|
||||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::milli::database_stats::DatabaseStats;
|
use meilisearch_types::milli::database_stats::DatabaseStats;
|
||||||
use meilisearch_types::milli::index::RollbackOutcome;
|
|
||||||
use meilisearch_types::milli::update::IndexerConfig;
|
use meilisearch_types::milli::update::IndexerConfig;
|
||||||
use meilisearch_types::milli::{FieldDistribution, Index};
|
use meilisearch_types::milli::{FieldDistribution, Index};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -165,7 +164,7 @@ impl IndexMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
env: &Env<WithoutTls>,
|
env: &Env,
|
||||||
wtxn: &mut RwTxn,
|
wtxn: &mut RwTxn,
|
||||||
options: &IndexSchedulerOptions,
|
options: &IndexSchedulerOptions,
|
||||||
budget: IndexBudget,
|
budget: IndexBudget,
|
||||||
@ -432,51 +431,6 @@ impl IndexMapper {
|
|||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rollback_index(
|
|
||||||
&self,
|
|
||||||
rtxn: &RoTxn,
|
|
||||||
name: &str,
|
|
||||||
to: (u32, u32, u32),
|
|
||||||
) -> Result<RollbackOutcome> {
|
|
||||||
// remove any currently updating index to make sure that we aren't keeping a reference to the index somewhere
|
|
||||||
drop(self.currently_updating_index.write().unwrap().take());
|
|
||||||
|
|
||||||
let uuid = self
|
|
||||||
.index_mapping
|
|
||||||
.get(rtxn, name)?
|
|
||||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
|
||||||
|
|
||||||
// take the lock to make sure noone is messing with the indexes while we rollback
|
|
||||||
// this will block any search or other operation, but we are rollbacking so this is probably acceptable.
|
|
||||||
let mut index_map = self.index_map.write().unwrap();
|
|
||||||
|
|
||||||
'close_index: loop {
|
|
||||||
match index_map.get(&uuid) {
|
|
||||||
Available(_) => {
|
|
||||||
index_map.close_for_resize(&uuid, self.enable_mdb_writemap, 0);
|
|
||||||
// index should now be `Closing`; try again
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// index already closed
|
|
||||||
Missing => break 'close_index,
|
|
||||||
// closing requested by this thread or another one; wait for closing to complete, then exit
|
|
||||||
Closing(closing_index) => {
|
|
||||||
if closing_index.wait_timeout(Duration::from_secs(100)).is_none() {
|
|
||||||
// release the lock so it doesn't get poisoned
|
|
||||||
drop(index_map);
|
|
||||||
panic!("cannot close index")
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
BeingDeleted => return Err(Error::IndexNotFound(name.to_string())),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let index_path = self.base_path.join(uuid.to_string());
|
|
||||||
Index::rollback(milli::heed::EnvOpenOptions::new().read_txn_without_tls(), index_path, to)
|
|
||||||
.map_err(|err| crate::Error::from_milli(err, Some(name.to_string())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempts `f` for each index that exists in the index mapper.
|
/// Attempts `f` for each index that exists in the index mapper.
|
||||||
///
|
///
|
||||||
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::collections::BTreeSet;
|
use std::collections::BTreeSet;
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use meilisearch_types::batches::{Batch, BatchEnqueuedAt, BatchStats};
|
use meilisearch_types::batches::{Batch, BatchEnqueuedAt};
|
||||||
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, RoTxn};
|
use meilisearch_types::heed::{Database, RoTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
@ -34,7 +34,6 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
planned_failures: _,
|
planned_failures: _,
|
||||||
run_loop_iteration: _,
|
run_loop_iteration: _,
|
||||||
embedders: _,
|
embedders: _,
|
||||||
chat_settings: _,
|
|
||||||
} = scheduler;
|
} = scheduler;
|
||||||
|
|
||||||
let rtxn = env.read_txn().unwrap();
|
let rtxn = env.read_txn().unwrap();
|
||||||
@ -42,8 +41,11 @@ pub fn snapshot_index_scheduler(scheduler: &IndexScheduler) -> String {
|
|||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
|
|
||||||
let indx_sched_version = version.get_version(&rtxn).unwrap();
|
let indx_sched_version = version.get_version(&rtxn).unwrap();
|
||||||
let latest_version =
|
let latest_version = (
|
||||||
(versioning::VERSION_MAJOR, versioning::VERSION_MINOR, versioning::VERSION_PATCH);
|
versioning::VERSION_MAJOR.parse().unwrap(),
|
||||||
|
versioning::VERSION_MINOR.parse().unwrap(),
|
||||||
|
versioning::VERSION_PATCH.parse().unwrap(),
|
||||||
|
);
|
||||||
if indx_sched_version != Some(latest_version) {
|
if indx_sched_version != Some(latest_version) {
|
||||||
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
snap.push_str(&format!("index scheduler running on version {indx_sched_version:?}\n"));
|
||||||
}
|
}
|
||||||
@ -339,22 +341,7 @@ pub fn snapshot_canceled_by(rtxn: &RoTxn, db: Database<BEU32, RoaringBitmapCodec
|
|||||||
|
|
||||||
pub fn snapshot_batch(batch: &Batch) -> String {
|
pub fn snapshot_batch(batch: &Batch) -> String {
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
let Batch {
|
let Batch { uid, details, stats, started_at, finished_at, progress: _, enqueued_at } = batch;
|
||||||
uid,
|
|
||||||
details,
|
|
||||||
stats,
|
|
||||||
started_at,
|
|
||||||
finished_at,
|
|
||||||
progress: _,
|
|
||||||
enqueued_at,
|
|
||||||
stop_reason,
|
|
||||||
} = batch;
|
|
||||||
let stats = BatchStats {
|
|
||||||
progress_trace: Default::default(),
|
|
||||||
internal_database_sizes: Default::default(),
|
|
||||||
write_channel_congestion: None,
|
|
||||||
..stats.clone()
|
|
||||||
};
|
|
||||||
if let Some(finished_at) = finished_at {
|
if let Some(finished_at) = finished_at {
|
||||||
assert!(finished_at > started_at);
|
assert!(finished_at > started_at);
|
||||||
}
|
}
|
||||||
@ -365,8 +352,7 @@ pub fn snapshot_batch(batch: &Batch) -> String {
|
|||||||
snap.push('{');
|
snap.push('{');
|
||||||
snap.push_str(&format!("uid: {uid}, "));
|
snap.push_str(&format!("uid: {uid}, "));
|
||||||
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
snap.push_str(&format!("details: {}, ", serde_json::to_string(details).unwrap()));
|
||||||
snap.push_str(&format!("stats: {}, ", serde_json::to_string(&stats).unwrap()));
|
snap.push_str(&format!("stats: {}, ", serde_json::to_string(stats).unwrap()));
|
||||||
snap.push_str(&format!("stop reason: {}, ", serde_json::to_string(&stop_reason).unwrap()));
|
|
||||||
snap.push('}');
|
snap.push('}');
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
|
@ -51,12 +51,10 @@ pub use features::RoFeatures;
|
|||||||
use flate2::bufread::GzEncoder;
|
use flate2::bufread::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use meilisearch_types::batches::Batch;
|
use meilisearch_types::batches::Batch;
|
||||||
use meilisearch_types::features::{
|
use meilisearch_types::features::{InstanceTogglableFeatures, Network, RuntimeTogglableFeatures};
|
||||||
ChatCompletionSettings, InstanceTogglableFeatures, Network, RuntimeTogglableFeatures,
|
|
||||||
};
|
|
||||||
use meilisearch_types::heed::byteorder::BE;
|
use meilisearch_types::heed::byteorder::BE;
|
||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeJson, Str, I128};
|
use meilisearch_types::heed::types::I128;
|
||||||
use meilisearch_types::heed::{self, Database, Env, RoTxn, WithoutTls};
|
use meilisearch_types::heed::{self, Env, RoTxn};
|
||||||
use meilisearch_types::milli::index::IndexEmbeddingConfig;
|
use meilisearch_types::milli::index::IndexEmbeddingConfig;
|
||||||
use meilisearch_types::milli::update::IndexerConfig;
|
use meilisearch_types::milli::update::IndexerConfig;
|
||||||
use meilisearch_types::milli::vector::{Embedder, EmbedderOptions, EmbeddingConfigs};
|
use meilisearch_types::milli::vector::{Embedder, EmbedderOptions, EmbeddingConfigs};
|
||||||
@ -76,9 +74,6 @@ use crate::utils::clamp_to_page_size;
|
|||||||
|
|
||||||
pub(crate) type BEI128 = I128<BE>;
|
pub(crate) type BEI128 = I128<BE>;
|
||||||
|
|
||||||
const TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT: u64 = 40;
|
|
||||||
const CHAT_SETTINGS_DB_NAME: &str = "chat-settings";
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct IndexSchedulerOptions {
|
pub struct IndexSchedulerOptions {
|
||||||
/// The path to the version file of Meilisearch.
|
/// The path to the version file of Meilisearch.
|
||||||
@ -130,19 +125,13 @@ pub struct IndexSchedulerOptions {
|
|||||||
pub instance_features: InstanceTogglableFeatures,
|
pub instance_features: InstanceTogglableFeatures,
|
||||||
/// The experimental features enabled for this instance.
|
/// The experimental features enabled for this instance.
|
||||||
pub auto_upgrade: bool,
|
pub auto_upgrade: bool,
|
||||||
/// The maximal number of entries in the search query cache of an embedder.
|
|
||||||
///
|
|
||||||
/// 0 disables the cache.
|
|
||||||
pub embedding_cache_cap: usize,
|
|
||||||
/// Snapshot compaction status.
|
|
||||||
pub experimental_no_snapshot_compaction: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Structure which holds meilisearch's indexes and schedules the tasks
|
/// Structure which holds meilisearch's indexes and schedules the tasks
|
||||||
/// to be performed on them.
|
/// to be performed on them.
|
||||||
pub struct IndexScheduler {
|
pub struct IndexScheduler {
|
||||||
/// The LMDB environment which the DBs are associated with.
|
/// The LMDB environment which the DBs are associated with.
|
||||||
pub(crate) env: Env<WithoutTls>,
|
pub(crate) env: Env,
|
||||||
|
|
||||||
/// The list of tasks currently processing
|
/// The list of tasks currently processing
|
||||||
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
pub(crate) processing_tasks: Arc<RwLock<ProcessingTasks>>,
|
||||||
@ -156,9 +145,6 @@ pub struct IndexScheduler {
|
|||||||
/// In charge of fetching and setting the status of experimental features.
|
/// In charge of fetching and setting the status of experimental features.
|
||||||
features: features::FeatureData,
|
features: features::FeatureData,
|
||||||
|
|
||||||
/// Stores the custom chat prompts and other settings of the indexes.
|
|
||||||
pub(crate) chat_settings: Database<Str, SerdeJson<ChatCompletionSettings>>,
|
|
||||||
|
|
||||||
/// Everything related to the processing of the tasks
|
/// Everything related to the processing of the tasks
|
||||||
pub scheduler: scheduler::Scheduler,
|
pub scheduler: scheduler::Scheduler,
|
||||||
|
|
||||||
@ -170,11 +156,6 @@ pub struct IndexScheduler {
|
|||||||
/// The Authorization header to send to the webhook URL.
|
/// The Authorization header to send to the webhook URL.
|
||||||
pub(crate) webhook_authorization_header: Option<String>,
|
pub(crate) webhook_authorization_header: Option<String>,
|
||||||
|
|
||||||
/// A map to retrieve the runtime representation of an embedder depending on its configuration.
|
|
||||||
///
|
|
||||||
/// This map may return the same embedder object for two different indexes or embedder settings,
|
|
||||||
/// but it will only do this if the embedder configuration options are the same, leading
|
|
||||||
/// to the same embeddings for the same input text.
|
|
||||||
embedders: Arc<RwLock<HashMap<EmbedderOptions, Arc<Embedder>>>>,
|
embedders: Arc<RwLock<HashMap<EmbedderOptions, Arc<Embedder>>>>,
|
||||||
|
|
||||||
// ================= test
|
// ================= test
|
||||||
@ -217,23 +198,17 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: self.run_loop_iteration.clone(),
|
run_loop_iteration: self.run_loop_iteration.clone(),
|
||||||
features: self.features.clone(),
|
features: self.features.clone(),
|
||||||
chat_settings: self.chat_settings,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) const fn nb_db() -> u32 {
|
pub(crate) const fn nb_db() -> u32 {
|
||||||
Versioning::nb_db()
|
Versioning::nb_db() + Queue::nb_db() + IndexMapper::nb_db() + features::FeatureData::nb_db()
|
||||||
+ Queue::nb_db()
|
|
||||||
+ IndexMapper::nb_db()
|
|
||||||
+ features::FeatureData::nb_db()
|
|
||||||
+ 1 // chat-prompts
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
/// Create an index scheduler and start its run loop.
|
||||||
#[allow(private_interfaces)] // because test_utils is private
|
#[allow(private_interfaces)] // because test_utils is private
|
||||||
pub fn new(
|
pub fn new(
|
||||||
options: IndexSchedulerOptions,
|
options: IndexSchedulerOptions,
|
||||||
auth_env: Env<WithoutTls>,
|
|
||||||
from_db_version: (u32, u32, u32),
|
from_db_version: (u32, u32, u32),
|
||||||
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
#[cfg(test)] test_breakpoint_sdr: crossbeam_channel::Sender<(test_utils::Breakpoint, bool)>,
|
||||||
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
#[cfg(test)] planned_failures: Vec<(usize, test_utils::FailureLocation)>,
|
||||||
@ -265,9 +240,7 @@ impl IndexScheduler {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let env = unsafe {
|
let env = unsafe {
|
||||||
let env_options = heed::EnvOpenOptions::new();
|
heed::EnvOpenOptions::new()
|
||||||
let mut env_options = env_options.read_txn_without_tls();
|
|
||||||
env_options
|
|
||||||
.max_dbs(Self::nb_db())
|
.max_dbs(Self::nb_db())
|
||||||
.map_size(budget.task_db_size)
|
.map_size(budget.task_db_size)
|
||||||
.open(&options.tasks_path)
|
.open(&options.tasks_path)
|
||||||
@ -280,7 +253,6 @@ impl IndexScheduler {
|
|||||||
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
|
let features = features::FeatureData::new(&env, &mut wtxn, options.instance_features)?;
|
||||||
let queue = Queue::new(&env, &mut wtxn, &options)?;
|
let queue = Queue::new(&env, &mut wtxn, &options)?;
|
||||||
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
|
let index_mapper = IndexMapper::new(&env, &mut wtxn, &options, budget)?;
|
||||||
let chat_settings = env.create_database(&mut wtxn, Some(CHAT_SETTINGS_DB_NAME))?;
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
|
|
||||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||||
@ -288,7 +260,7 @@ impl IndexScheduler {
|
|||||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||||
version,
|
version,
|
||||||
queue,
|
queue,
|
||||||
scheduler: Scheduler::new(&options, auth_env),
|
scheduler: Scheduler::new(&options),
|
||||||
|
|
||||||
index_mapper,
|
index_mapper,
|
||||||
env,
|
env,
|
||||||
@ -304,17 +276,12 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
run_loop_iteration: Arc::new(RwLock::new(0)),
|
run_loop_iteration: Arc::new(RwLock::new(0)),
|
||||||
features,
|
features,
|
||||||
chat_settings,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
this.run();
|
this.run();
|
||||||
Ok(this)
|
Ok(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_txn(&self) -> Result<RoTxn<WithoutTls>> {
|
|
||||||
self.env.read_txn().map_err(|e| e.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return `Ok(())` if the index scheduler is able to access one of its database.
|
/// Return `Ok(())` if the index scheduler is able to access one of its database.
|
||||||
pub fn health(&self) -> Result<()> {
|
pub fn health(&self) -> Result<()> {
|
||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
@ -391,16 +358,15 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn read_txn(&self) -> Result<RoTxn> {
|
||||||
|
self.env.read_txn().map_err(|e| e.into())
|
||||||
|
}
|
||||||
|
|
||||||
/// Start the run loop for the given index scheduler.
|
/// Start the run loop for the given index scheduler.
|
||||||
///
|
///
|
||||||
/// This function will execute in a different thread and must be called
|
/// This function will execute in a different thread and must be called
|
||||||
/// only once per index scheduler.
|
/// only once per index scheduler.
|
||||||
fn run(&self) {
|
fn run(&self) {
|
||||||
// If the number of batched tasks is 0, we don't need to run the scheduler at all.
|
|
||||||
// It will never be able to process any tasks.
|
|
||||||
if self.scheduler.max_number_of_batched_tasks == 0 {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let run = self.private_clone();
|
let run = self.private_clone();
|
||||||
std::thread::Builder::new()
|
std::thread::Builder::new()
|
||||||
.name(String::from("scheduler"))
|
.name(String::from("scheduler"))
|
||||||
@ -418,9 +384,9 @@ impl IndexScheduler {
|
|||||||
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
Ok(Ok(TickOutcome::StopProcessingForever)) => break,
|
||||||
Ok(Err(e)) => {
|
Ok(Err(e)) => {
|
||||||
tracing::error!("{e}");
|
tracing::error!("{e}");
|
||||||
// Wait when an irrecoverable error occurs.
|
// Wait one second when an irrecoverable error occurs.
|
||||||
if !e.is_recoverable() {
|
if !e.is_recoverable() {
|
||||||
std::thread::sleep(Duration::from_secs(10));
|
std::thread::sleep(Duration::from_secs(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_panic) => {
|
Err(_panic) => {
|
||||||
@ -447,17 +413,6 @@ impl IndexScheduler {
|
|||||||
Ok(self.env.non_free_pages_size()?)
|
Ok(self.env.non_free_pages_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the maximum possible database size
|
|
||||||
pub fn max_size(&self) -> Result<u64> {
|
|
||||||
Ok(self.env.info().map_size as u64)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the max size of task allowed until the task queue stop receiving.
|
|
||||||
pub fn remaining_size_until_task_queue_stop(&self) -> Result<u64> {
|
|
||||||
Ok((self.env.info().map_size as u64 * TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT / 100)
|
|
||||||
.saturating_sub(self.used_size()?))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the index corresponding to the name.
|
/// Return the index corresponding to the name.
|
||||||
///
|
///
|
||||||
/// * If the index wasn't opened before, the index will be opened.
|
/// * If the index wasn't opened before, the index will be opened.
|
||||||
@ -472,14 +427,12 @@ impl IndexScheduler {
|
|||||||
/// If you need to fetch information from or perform an action on all indexes,
|
/// If you need to fetch information from or perform an action on all indexes,
|
||||||
/// see the `try_for_each_index` function.
|
/// see the `try_for_each_index` function.
|
||||||
pub fn index(&self, name: &str) -> Result<Index> {
|
pub fn index(&self, name: &str) -> Result<Index> {
|
||||||
let rtxn = self.env.read_txn()?;
|
self.index_mapper.index(&self.env.read_txn()?, name)
|
||||||
self.index_mapper.index(&rtxn, name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the boolean referring if index exists.
|
/// Return the boolean referring if index exists.
|
||||||
pub fn index_exists(&self, name: &str) -> Result<bool> {
|
pub fn index_exists(&self, name: &str) -> Result<bool> {
|
||||||
let rtxn = self.env.read_txn()?;
|
self.index_mapper.index_exists(&self.env.read_txn()?, name)
|
||||||
self.index_mapper.index_exists(&rtxn, name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the name of all indexes without opening them.
|
/// Return the name of all indexes without opening them.
|
||||||
@ -508,7 +461,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
/// Returns the total number of indexes available for the specified filter.
|
/// Returns the total number of indexes available for the specified filter.
|
||||||
/// And a `Vec` of the index_uid + its stats
|
/// And a `Vec` of the index_uid + its stats
|
||||||
pub fn paginated_indexes_stats(
|
pub fn get_paginated_indexes_stats(
|
||||||
&self,
|
&self,
|
||||||
filters: &meilisearch_auth::AuthFilter,
|
filters: &meilisearch_auth::AuthFilter,
|
||||||
from: usize,
|
from: usize,
|
||||||
@ -549,31 +502,12 @@ impl IndexScheduler {
|
|||||||
ret.map(|ret| (total, ret))
|
ret.map(|ret| (total, ret))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the total number of chat workspaces available ~~for the specified filter~~.
|
|
||||||
/// And a `Vec` of the workspace_uids
|
|
||||||
pub fn paginated_chat_workspace_uids(
|
|
||||||
&self,
|
|
||||||
from: usize,
|
|
||||||
limit: usize,
|
|
||||||
) -> Result<(usize, Vec<String>)> {
|
|
||||||
let rtxn = self.read_txn()?;
|
|
||||||
let total = self.chat_settings.len(&rtxn)?;
|
|
||||||
let mut iter = self.chat_settings.iter(&rtxn)?.skip(from);
|
|
||||||
iter.by_ref()
|
|
||||||
.take(limit)
|
|
||||||
.map(|ret| ret.map_err(Error::from))
|
|
||||||
.map(|ret| ret.map(|(uid, _)| uid.to_string()))
|
|
||||||
.collect::<Result<Vec<_>, Error>>()
|
|
||||||
.map(|ret| (total as usize, ret))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The returned structure contains:
|
/// The returned structure contains:
|
||||||
/// 1. The name of the property being observed can be `statuses`, `types`, or `indexes`.
|
/// 1. The name of the property being observed can be `statuses`, `types`, or `indexes`.
|
||||||
/// 2. The name of the specific data related to the property can be `enqueued` for the `statuses`, `settingsUpdate` for the `types`, or the name of the index for the `indexes`, for example.
|
/// 2. The name of the specific data related to the property can be `enqueued` for the `statuses`, `settingsUpdate` for the `types`, or the name of the index for the `indexes`, for example.
|
||||||
/// 3. The number of times the properties appeared.
|
/// 3. The number of times the properties appeared.
|
||||||
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||||
let rtxn = self.read_txn()?;
|
self.queue.get_stats(&self.read_txn()?, &self.processing_tasks.read().unwrap())
|
||||||
self.queue.get_stats(&rtxn, &self.processing_tasks.read().unwrap())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return true if there is at least one task that is processing.
|
// Return true if there is at least one task that is processing.
|
||||||
@ -676,10 +610,9 @@ impl IndexScheduler {
|
|||||||
task_id: Option<TaskId>,
|
task_id: Option<TaskId>,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<Task> {
|
) -> Result<Task> {
|
||||||
// if the task doesn't delete or cancel anything and 40% of the task queue is full, we must refuse to enqueue the incoming task
|
// if the task doesn't delete anything and 50% of the task queue is full, we must refuse to enqueue the incomming task
|
||||||
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } | KindWithContent::TaskCancelation { tasks, .. } if !tasks.is_empty())
|
if !matches!(&kind, KindWithContent::TaskDeletion { tasks, .. } if !tasks.is_empty())
|
||||||
&& (self.env.non_free_pages_size()? * 100) / self.env.info().map_size as u64
|
&& (self.env.non_free_pages_size()? * 100) / self.env.info().map_size as u64 > 40
|
||||||
> TASK_SCHEDULER_SIZE_THRESHOLD_PERCENT_INT
|
|
||||||
{
|
{
|
||||||
return Err(Error::NoSpaceLeftInTaskQueue);
|
return Err(Error::NoSpaceLeftInTaskQueue);
|
||||||
}
|
}
|
||||||
@ -748,7 +681,7 @@ impl IndexScheduler {
|
|||||||
written: usize,
|
written: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Read for TaskReader<'_, '_> {
|
impl<'a, 'b> Read for TaskReader<'a, 'b> {
|
||||||
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
|
fn read(&mut self, mut buf: &mut [u8]) -> std::io::Result<usize> {
|
||||||
if self.buffer.is_empty() {
|
if self.buffer.is_empty() {
|
||||||
match self.tasks.next() {
|
match self.tasks.next() {
|
||||||
@ -879,7 +812,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// add missing embedder
|
// add missing embedder
|
||||||
let embedder = Arc::new(
|
let embedder = Arc::new(
|
||||||
Embedder::new(embedder_options.clone(), self.scheduler.embedding_cache_cap)
|
Embedder::new(embedder_options.clone())
|
||||||
.map_err(meilisearch_types::milli::vector::Error::from)
|
.map_err(meilisearch_types::milli::vector::Error::from)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
Error::from_milli(err.into(), Some(index_uid.clone()))
|
Error::from_milli(err.into(), Some(index_uid.clone()))
|
||||||
@ -895,31 +828,6 @@ impl IndexScheduler {
|
|||||||
.collect();
|
.collect();
|
||||||
res.map(EmbeddingConfigs::new)
|
res.map(EmbeddingConfigs::new)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chat_settings(&self, uid: &str) -> Result<Option<ChatCompletionSettings>> {
|
|
||||||
let rtxn = self.env.read_txn()?;
|
|
||||||
self.chat_settings.get(&rtxn, uid).map_err(Into::into)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return true if chat workspace exists.
|
|
||||||
pub fn chat_workspace_exists(&self, name: &str) -> Result<bool> {
|
|
||||||
let rtxn = self.env.read_txn()?;
|
|
||||||
Ok(self.chat_settings.remap_data_type::<DecodeIgnore>().get(&rtxn, name)?.is_some())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn put_chat_settings(&self, uid: &str, settings: &ChatCompletionSettings) -> Result<()> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
self.chat_settings.put(&mut wtxn, uid, settings)?;
|
|
||||||
wtxn.commit()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn delete_chat_settings(&self, uid: &str) -> Result<bool> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
let deleted = self.chat_settings.delete(&mut wtxn, uid)?;
|
|
||||||
wtxn.commit()?;
|
|
||||||
Ok(deleted)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The outcome of calling the [`IndexScheduler::tick`] function.
|
/// The outcome of calling the [`IndexScheduler::tick`] function.
|
||||||
|
@ -64,17 +64,9 @@ make_enum_progress! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
make_enum_progress! {
|
|
||||||
pub enum FinalizingIndexStep {
|
|
||||||
Committing,
|
|
||||||
ComputingStats,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
make_enum_progress! {
|
make_enum_progress! {
|
||||||
pub enum TaskCancelationProgress {
|
pub enum TaskCancelationProgress {
|
||||||
RetrievingTasks,
|
RetrievingTasks,
|
||||||
CancelingUpgrade,
|
|
||||||
UpdatingTasks,
|
UpdatingTasks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use std::ops::{Bound, RangeBounds};
|
|||||||
|
|
||||||
use meilisearch_types::batches::{Batch, BatchId};
|
use meilisearch_types::batches::{Batch, BatchId};
|
||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::{Kind, Status};
|
use meilisearch_types::tasks::{Kind, Status};
|
||||||
use roaring::{MultiOps, RoaringBitmap};
|
use roaring::{MultiOps, RoaringBitmap};
|
||||||
@ -66,7 +66,7 @@ impl BatchQueue {
|
|||||||
NUMBER_OF_DATABASES
|
NUMBER_OF_DATABASES
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn new(env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> Result<Self> {
|
pub(super) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
all_batches: env.create_database(wtxn, Some(db_name::ALL_BATCHES))?,
|
all_batches: env.create_database(wtxn, Some(db_name::ALL_BATCHES))?,
|
||||||
status: env.create_database(wtxn, Some(db_name::BATCH_STATUS))?,
|
status: env.create_database(wtxn, Some(db_name::BATCH_STATUS))?,
|
||||||
@ -182,7 +182,6 @@ impl BatchQueue {
|
|||||||
started_at: batch.started_at,
|
started_at: batch.started_at,
|
||||||
finished_at: batch.finished_at,
|
finished_at: batch.finished_at,
|
||||||
enqueued_at: batch.enqueued_at,
|
enqueued_at: batch.enqueued_at,
|
||||||
stop_reason: batch.reason.to_string(),
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ fn query_batches_simple() {
|
|||||||
batches[0].enqueued_at = None;
|
batches[0].enqueued_at = None;
|
||||||
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
// Insta cannot snapshot our batches because the batch stats contains an enum as key: https://github.com/mitsuhiko/insta/issues/689
|
||||||
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
let batch = serde_json::to_string_pretty(&batches[0]).unwrap();
|
||||||
snapshot!(batch, @r###"
|
snapshot!(batch, @r#"
|
||||||
{
|
{
|
||||||
"uid": 0,
|
"uid": 0,
|
||||||
"details": {
|
"details": {
|
||||||
@ -126,10 +126,9 @@ fn query_batches_simple() {
|
|||||||
},
|
},
|
||||||
"startedAt": "1970-01-01T00:00:00Z",
|
"startedAt": "1970-01-01T00:00:00Z",
|
||||||
"finishedAt": null,
|
"finishedAt": null,
|
||||||
"enqueuedAt": null,
|
"enqueuedAt": null
|
||||||
"stopReason": "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task."
|
|
||||||
}
|
}
|
||||||
"###);
|
"#);
|
||||||
|
|
||||||
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
||||||
let (batches, _) = index_scheduler
|
let (batches, _) = index_scheduler
|
||||||
|
@ -13,7 +13,7 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use file_store::FileStore;
|
use file_store::FileStore;
|
||||||
use meilisearch_types::batches::BatchId;
|
use meilisearch_types::batches::BatchId;
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@ -157,7 +157,7 @@ impl Queue {
|
|||||||
|
|
||||||
/// Create an index scheduler and start its run loop.
|
/// Create an index scheduler and start its run loop.
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
env: &Env<WithoutTls>,
|
env: &Env,
|
||||||
wtxn: &mut RwTxn,
|
wtxn: &mut RwTxn,
|
||||||
options: &IndexSchedulerOptions,
|
options: &IndexSchedulerOptions,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
@ -292,6 +292,8 @@ impl Queue {
|
|||||||
return Ok(task);
|
return Ok(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get rid of the mutability.
|
||||||
|
let task = task;
|
||||||
self.tasks.register(wtxn, &task)?;
|
self.tasks.register(wtxn, &task)?;
|
||||||
|
|
||||||
Ok(task)
|
Ok(task)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -48,8 +49,8 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -47,9 +48,9 @@ whalo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { primary_key: Some("mouse") }, kind: IndexCreation { index_uid: "catto", primary_key: Some("mouse") }}
|
||||||
@ -42,7 +43,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -47,9 +48,9 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/batches_test.rs
|
source: crates/index-scheduler/src/queue/batches_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -52,10 +53,10 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [3,]
|
[timestamp] [3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 2 of type `indexSwap` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, stop reason: "created batch containing only task with id 3 of type `indexSwap` that cannot be batched with any other task.", }
|
3 {uid: 3, details: {"swaps":[{"indexes":["catto","whalo"]}]}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexSwap":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -48,8 +49,8 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep","matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query","swaps":[{"indexes":["catto","doggo"]}]}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"indexCreation":1,"indexSwap":1,"taskCancelation":1},"indexUids":{"doggo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -47,9 +48,9 @@ whalo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"bone"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"plankton"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"his_own_vomit"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/queue/tasks_test.rs
|
source: crates/index-scheduler/src/queue/tasks_test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -47,9 +48,9 @@ doggo: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [2,]
|
[timestamp] [2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"primaryKey":"mouse"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"primaryKey":"sheep"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggo":1}}, }
|
||||||
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {"primaryKey":"fish"}, stats: {"totalNbTasks":1,"status":{"failed":1},"types":{"indexCreation":1},"indexUids":{"whalo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::ops::{Bound, RangeBounds};
|
use std::ops::{Bound, RangeBounds};
|
||||||
|
|
||||||
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
use meilisearch_types::heed::types::{DecodeIgnore, SerdeBincode, SerdeJson, Str};
|
||||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn, WithoutTls};
|
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
use meilisearch_types::milli::{CboRoaringBitmapCodec, RoaringBitmapCodec, BEU32};
|
||||||
use meilisearch_types::tasks::{Kind, Status, Task};
|
use meilisearch_types::tasks::{Kind, Status, Task};
|
||||||
use roaring::{MultiOps, RoaringBitmap};
|
use roaring::{MultiOps, RoaringBitmap};
|
||||||
@ -68,7 +68,7 @@ impl TaskQueue {
|
|||||||
NUMBER_OF_DATABASES
|
NUMBER_OF_DATABASES
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn new(env: &Env<WithoutTls>, wtxn: &mut RwTxn) -> Result<Self> {
|
pub(crate) fn new(env: &Env, wtxn: &mut RwTxn) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
all_tasks: env.create_database(wtxn, Some(db_name::ALL_TASKS))?,
|
all_tasks: env.create_database(wtxn, Some(db_name::ALL_TASKS))?,
|
||||||
status: env.create_database(wtxn, Some(db_name::STATUS))?,
|
status: env.create_database(wtxn, Some(db_name::STATUS))?,
|
||||||
@ -315,7 +315,7 @@ impl Queue {
|
|||||||
if let Some(batch_uids) = batch_uids {
|
if let Some(batch_uids) = batch_uids {
|
||||||
let mut batch_tasks = RoaringBitmap::new();
|
let mut batch_tasks = RoaringBitmap::new();
|
||||||
for batch_uid in batch_uids {
|
for batch_uid in batch_uids {
|
||||||
if processing_batch.as_ref().is_some_and(|batch| batch.uid == *batch_uid) {
|
if processing_batch.as_ref().map_or(false, |batch| batch.uid == *batch_uid) {
|
||||||
batch_tasks |= &**processing_tasks;
|
batch_tasks |= &**processing_tasks;
|
||||||
} else {
|
} else {
|
||||||
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;
|
batch_tasks |= self.tasks_in_batch(rtxn, *batch_uid)?;
|
||||||
|
@ -364,7 +364,7 @@ fn test_task_queue_is_full() {
|
|||||||
// we won't be able to test this error in an integration test thus as a best effort test IÂ still ensure the error return the expected error code
|
// we won't be able to test this error in an integration test thus as a best effort test IÂ still ensure the error return the expected error code
|
||||||
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
||||||
|
|
||||||
// Even the task deletion and cancelation that don't delete anything should be refused
|
// Even the task deletion that doesn't delete anything shouldn't be accepted
|
||||||
let result = index_scheduler
|
let result = index_scheduler
|
||||||
.register(
|
.register(
|
||||||
KindWithContent::TaskDeletion { query: S("test"), tasks: RoaringBitmap::new() },
|
KindWithContent::TaskDeletion { query: S("test"), tasks: RoaringBitmap::new() },
|
||||||
@ -373,39 +373,10 @@ fn test_task_queue_is_full() {
|
|||||||
)
|
)
|
||||||
.unwrap_err();
|
.unwrap_err();
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
||||||
let result = index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::TaskCancelation { query: S("test"), tasks: RoaringBitmap::new() },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap_err();
|
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
|
||||||
|
|
||||||
// we won't be able to test this error in an integration test thus as a best effort test IÂ still ensure the error return the expected error code
|
// we won't be able to test this error in an integration test thus as a best effort test IÂ still ensure the error return the expected error code
|
||||||
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
snapshot!(format!("{:?}", result.error_code()), @"NoSpaceLeftOnDevice");
|
||||||
|
|
||||||
// But a task cancelation that cancel something should work
|
// But a task deletion that delete something should works
|
||||||
index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::TaskCancelation { query: S("test"), tasks: (0..100).collect() },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
handle.advance_one_successful_batch();
|
|
||||||
|
|
||||||
// But we should still be forbidden from enqueuing new tasks
|
|
||||||
let result = index_scheduler
|
|
||||||
.register(
|
|
||||||
KindWithContent::IndexCreation { index_uid: S("doggo"), primary_key: None },
|
|
||||||
None,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.unwrap_err();
|
|
||||||
snapshot!(result, @"Meilisearch cannot receive write operations because the limit of the task database has been reached. Please delete tasks to continue performing write operations.");
|
|
||||||
|
|
||||||
// And a task deletion that delete something should works
|
|
||||||
index_scheduler
|
index_scheduler
|
||||||
.register(
|
.register(
|
||||||
KindWithContent::TaskDeletion { query: S("test"), tasks: (0..100).collect() },
|
KindWithContent::TaskDeletion { query: S("test"), tasks: (0..100).collect() },
|
||||||
|
@ -7,7 +7,10 @@ The main function of the autobatcher is [`next_autobatch`].
|
|||||||
|
|
||||||
use std::ops::ControlFlow::{self, Break, Continue};
|
use std::ops::ControlFlow::{self, Break, Continue};
|
||||||
|
|
||||||
use meilisearch_types::tasks::{BatchStopReason, PrimaryKeyMismatchReason, TaskId};
|
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
||||||
|
self, ReplaceDocuments, UpdateDocuments,
|
||||||
|
};
|
||||||
|
use meilisearch_types::tasks::TaskId;
|
||||||
|
|
||||||
use crate::KindWithContent;
|
use crate::KindWithContent;
|
||||||
|
|
||||||
@ -16,11 +19,19 @@ use crate::KindWithContent;
|
|||||||
///
|
///
|
||||||
/// Only the non-prioritised tasks that can be grouped in a batch have a corresponding [`AutobatchKind`]
|
/// Only the non-prioritised tasks that can be grouped in a batch have a corresponding [`AutobatchKind`]
|
||||||
enum AutobatchKind {
|
enum AutobatchKind {
|
||||||
DocumentImport { allow_index_creation: bool, primary_key: Option<String> },
|
DocumentImport {
|
||||||
|
method: IndexDocumentsMethod,
|
||||||
|
allow_index_creation: bool,
|
||||||
|
primary_key: Option<String>,
|
||||||
|
},
|
||||||
DocumentEdition,
|
DocumentEdition,
|
||||||
DocumentDeletion { by_filter: bool },
|
DocumentDeletion {
|
||||||
|
by_filter: bool,
|
||||||
|
},
|
||||||
DocumentClear,
|
DocumentClear,
|
||||||
Settings { allow_index_creation: bool },
|
Settings {
|
||||||
|
allow_index_creation: bool,
|
||||||
|
},
|
||||||
IndexCreation,
|
IndexCreation,
|
||||||
IndexDeletion,
|
IndexDeletion,
|
||||||
IndexUpdate,
|
IndexUpdate,
|
||||||
@ -49,8 +60,11 @@ impl From<KindWithContent> for AutobatchKind {
|
|||||||
fn from(kind: KindWithContent) -> Self {
|
fn from(kind: KindWithContent) -> Self {
|
||||||
match kind {
|
match kind {
|
||||||
KindWithContent::DocumentAdditionOrUpdate {
|
KindWithContent::DocumentAdditionOrUpdate {
|
||||||
allow_index_creation, primary_key, ..
|
method,
|
||||||
} => AutobatchKind::DocumentImport { allow_index_creation, primary_key },
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
|
..
|
||||||
|
} => AutobatchKind::DocumentImport { method, allow_index_creation, primary_key },
|
||||||
KindWithContent::DocumentEdition { .. } => AutobatchKind::DocumentEdition,
|
KindWithContent::DocumentEdition { .. } => AutobatchKind::DocumentEdition,
|
||||||
KindWithContent::DocumentDeletion { .. } => {
|
KindWithContent::DocumentDeletion { .. } => {
|
||||||
AutobatchKind::DocumentDeletion { by_filter: false }
|
AutobatchKind::DocumentDeletion { by_filter: false }
|
||||||
@ -85,6 +99,7 @@ pub enum BatchKind {
|
|||||||
ids: Vec<TaskId>,
|
ids: Vec<TaskId>,
|
||||||
},
|
},
|
||||||
DocumentOperation {
|
DocumentOperation {
|
||||||
|
method: IndexDocumentsMethod,
|
||||||
allow_index_creation: bool,
|
allow_index_creation: bool,
|
||||||
primary_key: Option<String>,
|
primary_key: Option<String>,
|
||||||
operation_ids: Vec<TaskId>,
|
operation_ids: Vec<TaskId>,
|
||||||
@ -146,48 +161,23 @@ impl BatchKind {
|
|||||||
// TODO use an AutoBatchKind as input
|
// TODO use an AutoBatchKind as input
|
||||||
pub fn new(
|
pub fn new(
|
||||||
task_id: TaskId,
|
task_id: TaskId,
|
||||||
kind_with_content: KindWithContent,
|
kind: KindWithContent,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
) -> (ControlFlow<(BatchKind, BatchStopReason), BatchKind>, bool) {
|
) -> (ControlFlow<BatchKind, BatchKind>, bool) {
|
||||||
use AutobatchKind as K;
|
use AutobatchKind as K;
|
||||||
|
|
||||||
let kind = kind_with_content.as_kind();
|
match AutobatchKind::from(kind) {
|
||||||
|
K::IndexCreation => (Break(BatchKind::IndexCreation { id: task_id }), true),
|
||||||
match AutobatchKind::from(kind_with_content) {
|
K::IndexDeletion => (Break(BatchKind::IndexDeletion { ids: vec![task_id] }), false),
|
||||||
K::IndexCreation => (
|
K::IndexUpdate => (Break(BatchKind::IndexUpdate { id: task_id }), false),
|
||||||
Break((
|
K::IndexSwap => (Break(BatchKind::IndexSwap { id: task_id }), false),
|
||||||
BatchKind::IndexCreation { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
true,
|
|
||||||
),
|
|
||||||
K::IndexDeletion => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexDeletion { ids: vec![task_id] },
|
|
||||||
BatchStopReason::IndexDeletion { id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::IndexUpdate => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexUpdate { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::IndexSwap => (
|
|
||||||
Break((
|
|
||||||
BatchKind::IndexSwap { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::DocumentClear => (Continue(BatchKind::DocumentClear { ids: vec![task_id] }), false),
|
K::DocumentClear => (Continue(BatchKind::DocumentClear { ids: vec![task_id] }), false),
|
||||||
K::DocumentImport { allow_index_creation, primary_key: pk }
|
K::DocumentImport { method, allow_index_creation, primary_key: pk }
|
||||||
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
|
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
|
||||||
{
|
{
|
||||||
(
|
(
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
primary_key: pk,
|
primary_key: pk,
|
||||||
operation_ids: vec![task_id],
|
operation_ids: vec![task_id],
|
||||||
@ -196,28 +186,16 @@ impl BatchKind {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
// if the primary key set in the task was different than ours we should stop and make this batch fail asap.
|
// if the primary key set in the task was different than ours we should stop and make this batch fail asap.
|
||||||
K::DocumentImport { allow_index_creation, primary_key: pk } => (
|
K::DocumentImport { method, allow_index_creation, primary_key } => (
|
||||||
Break((
|
Break(BatchKind::DocumentOperation {
|
||||||
BatchKind::DocumentOperation {
|
method,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
primary_key: pk.clone(),
|
primary_key,
|
||||||
operation_ids: vec![task_id],
|
operation_ids: vec![task_id],
|
||||||
},
|
}),
|
||||||
BatchStopReason::PrimaryKeyIndexMismatch {
|
|
||||||
id: task_id,
|
|
||||||
in_index: primary_key.unwrap().to_owned(),
|
|
||||||
in_task: pk.unwrap(),
|
|
||||||
},
|
|
||||||
)),
|
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
),
|
),
|
||||||
K::DocumentEdition => (
|
K::DocumentEdition => (Break(BatchKind::DocumentEdition { id: task_id }), false),
|
||||||
Break((
|
|
||||||
BatchKind::DocumentEdition { id: task_id },
|
|
||||||
BatchStopReason::TaskCannotBeBatched { kind, id: task_id },
|
|
||||||
)),
|
|
||||||
false,
|
|
||||||
),
|
|
||||||
K::DocumentDeletion { by_filter: includes_by_filter } => (
|
K::DocumentDeletion { by_filter: includes_by_filter } => (
|
||||||
Continue(BatchKind::DocumentDeletion {
|
Continue(BatchKind::DocumentDeletion {
|
||||||
deletion_ids: vec![task_id],
|
deletion_ids: vec![task_id],
|
||||||
@ -237,71 +215,54 @@ impl BatchKind {
|
|||||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||||
/// but false can't be returned if you needs to create an index.
|
/// but false can't be returned if you needs to create an index.
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
fn accumulate(self, id: TaskId, kind_with_content: KindWithContent, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<(BatchKind, BatchStopReason), BatchKind> {
|
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
||||||
use AutobatchKind as K;
|
use AutobatchKind as K;
|
||||||
|
|
||||||
let kind = kind_with_content.as_kind();
|
match (self, kind) {
|
||||||
let autobatch_kind = AutobatchKind::from(kind_with_content);
|
|
||||||
|
|
||||||
let pk: Option<String> = match (self.primary_key(), autobatch_kind.primary_key(), primary_key) {
|
|
||||||
// 1. If incoming task don't interact with primary key -> we can continue
|
|
||||||
(batch_pk, None | Some(None), _) => {
|
|
||||||
batch_pk.flatten().map(ToOwned::to_owned)
|
|
||||||
},
|
|
||||||
// 2.1 If we already have a primary-key ->
|
|
||||||
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
|
|
||||||
(_batch_pk, Some(Some(task_pk)), Some(index_pk)) => if task_pk == index_pk {
|
|
||||||
Some(task_pk.to_owned())
|
|
||||||
} else {
|
|
||||||
return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::TaskPrimaryKeyDifferFromIndexPrimaryKey {
|
|
||||||
task_pk: task_pk.to_owned(),
|
|
||||||
index_pk: index_pk.to_owned(),
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
},
|
|
||||||
// 2.2 If we don't have a primary-key ->
|
|
||||||
// 2.2.2 If the batch is set to Some(None), the task should be too
|
|
||||||
(Some(None), Some(Some(task_pk)), None) => return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::CannotInterfereWithPrimaryKeyGuessing {
|
|
||||||
task_pk: task_pk.to_owned(),
|
|
||||||
},
|
|
||||||
})),
|
|
||||||
(Some(Some(batch_pk)), Some(Some(task_pk)), None) => if task_pk == batch_pk {
|
|
||||||
Some(task_pk.to_owned())
|
|
||||||
} else {
|
|
||||||
let batch_pk = batch_pk.to_owned();
|
|
||||||
let task_pk = task_pk.to_owned();
|
|
||||||
return Break((self, BatchStopReason::PrimaryKeyMismatch {
|
|
||||||
id,
|
|
||||||
reason: PrimaryKeyMismatchReason::TaskPrimaryKeyDifferFromCurrentBatchPrimaryKey {
|
|
||||||
batch_pk,
|
|
||||||
task_pk
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
},
|
|
||||||
(None, Some(Some(task_pk)), None) => Some(task_pk.to_owned())
|
|
||||||
};
|
|
||||||
|
|
||||||
match (self, autobatch_kind) {
|
|
||||||
// We don't batch any of these operations
|
// We don't batch any of these operations
|
||||||
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break((this, BatchStopReason::TaskCannotBeBatched { kind, id })),
|
(this, K::IndexCreation | K::IndexUpdate | K::IndexSwap | K::DocumentEdition) => Break(this),
|
||||||
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
// We must not batch tasks that don't have the same index creation rights if the index doesn't already exists.
|
||||||
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
(this, kind) if !index_already_exists && this.allow_index_creation() == Some(false) && kind.allow_index_creation() == Some(true) => {
|
||||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
Break(this)
|
||||||
|
},
|
||||||
|
// NOTE: We need to negate the whole condition since we're checking if we need to break instead of continue.
|
||||||
|
// I wrote it this way because it's easier to understand than the other way around.
|
||||||
|
(this, kind) if !(
|
||||||
|
// 1. If both task don't interact with primary key -> we can continue
|
||||||
|
(this.primary_key().is_none() && kind.primary_key().is_none()) ||
|
||||||
|
// 2. Else ->
|
||||||
|
(
|
||||||
|
// 2.1 If we already have a primary-key ->
|
||||||
|
(
|
||||||
|
primary_key.is_some() &&
|
||||||
|
// 2.1.1 If the task we're trying to accumulate have a pk it must be equal to our primary key
|
||||||
|
// 2.1.2 If the task don't have a primary-key -> we can continue
|
||||||
|
kind.primary_key().map_or(true, |pk| pk == primary_key)
|
||||||
|
) ||
|
||||||
|
// 2.2 If we don't have a primary-key ->
|
||||||
|
(
|
||||||
|
// 2.2.1 If both the batch and the task have a primary key they should be equal
|
||||||
|
// 2.2.2 If the batch is set to Some(None), the task should be too
|
||||||
|
// 2.2.3 If the batch is set to None -> we can continue
|
||||||
|
this.primary_key().zip(kind.primary_key()).map_or(true, |(this, kind)| this == kind)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
) // closing the negation
|
||||||
|
|
||||||
|
=> {
|
||||||
|
Break(this)
|
||||||
},
|
},
|
||||||
// The index deletion can batch with everything but must stop after
|
// The index deletion can batch with everything but must stop after
|
||||||
(
|
(
|
||||||
BatchKind::DocumentClear { mut ids }
|
BatchKind::DocumentClear { mut ids }
|
||||||
| BatchKind::DocumentDeletion { deletion_ids: mut ids, includes_by_filter: _ }
|
| BatchKind::DocumentDeletion { deletion_ids: mut ids, includes_by_filter: _ }
|
||||||
| BatchKind::DocumentOperation { allow_index_creation: _, primary_key: _, operation_ids: mut ids }
|
| BatchKind::DocumentOperation { method: _, allow_index_creation: _, primary_key: _, operation_ids: mut ids }
|
||||||
| BatchKind::Settings { allow_index_creation: _, settings_ids: mut ids },
|
| BatchKind::Settings { allow_index_creation: _, settings_ids: mut ids },
|
||||||
K::IndexDeletion,
|
K::IndexDeletion,
|
||||||
) => {
|
) => {
|
||||||
ids.push(id);
|
ids.push(id);
|
||||||
Break((BatchKind::IndexDeletion { ids }, BatchStopReason::IndexDeletion { id }))
|
Break(BatchKind::IndexDeletion { ids })
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
BatchKind::ClearAndSettings { settings_ids: mut ids, allow_index_creation: _, mut other },
|
BatchKind::ClearAndSettings { settings_ids: mut ids, allow_index_creation: _, mut other },
|
||||||
@ -309,7 +270,7 @@ impl BatchKind {
|
|||||||
) => {
|
) => {
|
||||||
ids.push(id);
|
ids.push(id);
|
||||||
ids.append(&mut other);
|
ids.append(&mut other);
|
||||||
Break((BatchKind::IndexDeletion { ids }, BatchStopReason::IndexDeletion { id }))
|
Break(BatchKind::IndexDeletion { ids })
|
||||||
}
|
}
|
||||||
|
|
||||||
(
|
(
|
||||||
@ -322,37 +283,51 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::DocumentClear { .. },
|
this @ BatchKind::DocumentClear { .. },
|
||||||
K::DocumentImport { .. } | K::Settings { .. },
|
K::DocumentImport { .. } | K::Settings { .. },
|
||||||
) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation: _, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { method: _, allow_index_creation: _, primary_key: _, mut operation_ids },
|
||||||
K::DocumentClear,
|
K::DocumentClear,
|
||||||
) => {
|
) => {
|
||||||
operation_ids.push(id);
|
operation_ids.push(id);
|
||||||
Continue(BatchKind::DocumentClear { ids: operation_ids })
|
Continue(BatchKind::DocumentClear { ids: operation_ids })
|
||||||
}
|
}
|
||||||
|
|
||||||
// we can autobatch different kind of document operations and mix replacements with updates
|
// we can autobatch the same kind of document additions / updates
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { method: ReplaceDocuments, allow_index_creation, primary_key: _, mut operation_ids },
|
||||||
K::DocumentImport { primary_key: _, .. },
|
K::DocumentImport { method: ReplaceDocuments, primary_key: pk, .. },
|
||||||
) => {
|
) => {
|
||||||
operation_ids.push(id);
|
operation_ids.push(id);
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method: ReplaceDocuments,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
operation_ids,
|
operation_ids,
|
||||||
primary_key: pk,
|
primary_key: pk,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
BatchKind::DocumentOperation { allow_index_creation, primary_key: _, mut operation_ids },
|
BatchKind::DocumentOperation { method: UpdateDocuments, allow_index_creation, primary_key: _, mut operation_ids },
|
||||||
|
K::DocumentImport { method: UpdateDocuments, primary_key: pk, .. },
|
||||||
|
) => {
|
||||||
|
operation_ids.push(id);
|
||||||
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method: UpdateDocuments,
|
||||||
|
allow_index_creation,
|
||||||
|
primary_key: pk,
|
||||||
|
operation_ids,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
(
|
||||||
|
BatchKind::DocumentOperation { method, allow_index_creation, primary_key, mut operation_ids },
|
||||||
K::DocumentDeletion { by_filter: false },
|
K::DocumentDeletion { by_filter: false },
|
||||||
) => {
|
) => {
|
||||||
operation_ids.push(id);
|
operation_ids.push(id);
|
||||||
|
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
operation_ids,
|
operation_ids,
|
||||||
primary_key: pk,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// We can't batch a document operation with a delete by filter
|
// We can't batch a document operation with a delete by filter
|
||||||
@ -360,12 +335,19 @@ impl BatchKind {
|
|||||||
this @ BatchKind::DocumentOperation { .. },
|
this @ BatchKind::DocumentOperation { .. },
|
||||||
K::DocumentDeletion { by_filter: true },
|
K::DocumentDeletion { by_filter: true },
|
||||||
) => {
|
) => {
|
||||||
Break((this, BatchStopReason::DocumentOperationWithDeletionByFilter { id }))
|
Break(this)
|
||||||
}
|
}
|
||||||
|
// but we can't autobatch documents if it's not the same kind
|
||||||
|
// this match branch MUST be AFTER the previous one
|
||||||
|
(
|
||||||
|
this @ BatchKind::DocumentOperation { .. },
|
||||||
|
K::DocumentImport { .. },
|
||||||
|
) => Break(this),
|
||||||
|
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentOperation { .. },
|
this @ BatchKind::DocumentOperation { .. },
|
||||||
K::Settings { .. },
|
K::Settings { .. },
|
||||||
) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
) => Break(this),
|
||||||
|
|
||||||
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: _ }, K::DocumentClear) => {
|
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: _ }, K::DocumentClear) => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
@ -375,15 +357,16 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::DocumentDeletion { deletion_ids: _, includes_by_filter: true },
|
this @ BatchKind::DocumentDeletion { deletion_ids: _, includes_by_filter: true },
|
||||||
K::DocumentImport { .. }
|
K::DocumentImport { .. }
|
||||||
) => Break((this, BatchStopReason::DeletionByFilterWithDocumentOperation { id })),
|
) => Break(this),
|
||||||
// we can autobatch the deletion and import if the index already exists
|
// we can autobatch the deletion and import if the index already exists
|
||||||
(
|
(
|
||||||
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
||||||
K::DocumentImport { allow_index_creation, primary_key }
|
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||||
) if index_already_exists => {
|
) if index_already_exists => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
|
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
primary_key,
|
primary_key,
|
||||||
operation_ids: deletion_ids,
|
operation_ids: deletion_ids,
|
||||||
@ -392,28 +375,29 @@ impl BatchKind {
|
|||||||
// we can autobatch the deletion and import if both can't create an index
|
// we can autobatch the deletion and import if both can't create an index
|
||||||
(
|
(
|
||||||
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter: false },
|
||||||
K::DocumentImport { allow_index_creation, primary_key }
|
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||||
) if !allow_index_creation => {
|
) if !allow_index_creation => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
|
|
||||||
Continue(BatchKind::DocumentOperation {
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
primary_key,
|
primary_key,
|
||||||
operation_ids: deletion_ids,
|
operation_ids: deletion_ids,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// we can't autobatch a deletion and an import if the index does not exist but would be created by an addition
|
// we can't autobatch a deletion and an import if the index does not exists but would be created by an addition
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentDeletion { .. },
|
this @ BatchKind::DocumentDeletion { .. },
|
||||||
K::DocumentImport { .. }
|
K::DocumentImport { .. }
|
||||||
) => {
|
) => {
|
||||||
Break((this, BatchStopReason::IndexCreationMismatch { id }))
|
Break(this)
|
||||||
}
|
}
|
||||||
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter }, K::DocumentDeletion { by_filter }) => {
|
(BatchKind::DocumentDeletion { mut deletion_ids, includes_by_filter }, K::DocumentDeletion { by_filter }) => {
|
||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
Continue(BatchKind::DocumentDeletion { deletion_ids, includes_by_filter: includes_by_filter | by_filter })
|
Continue(BatchKind::DocumentDeletion { deletion_ids, includes_by_filter: includes_by_filter | by_filter })
|
||||||
}
|
}
|
||||||
(this @ BatchKind::DocumentDeletion { .. }, K::Settings { .. }) => Break((this, BatchStopReason::DocumentOperationWithSettings { id })),
|
(this @ BatchKind::DocumentDeletion { .. }, K::Settings { .. }) => Break(this),
|
||||||
|
|
||||||
(
|
(
|
||||||
BatchKind::Settings { settings_ids, allow_index_creation },
|
BatchKind::Settings { settings_ids, allow_index_creation },
|
||||||
@ -426,7 +410,7 @@ impl BatchKind {
|
|||||||
(
|
(
|
||||||
this @ BatchKind::Settings { .. },
|
this @ BatchKind::Settings { .. },
|
||||||
K::DocumentImport { .. } | K::DocumentDeletion { .. },
|
K::DocumentImport { .. } | K::DocumentDeletion { .. },
|
||||||
) => Break((this, BatchStopReason::SettingsWithDocumentOperation { id })),
|
) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::Settings { mut settings_ids, allow_index_creation },
|
BatchKind::Settings { mut settings_ids, allow_index_creation },
|
||||||
K::Settings { .. },
|
K::Settings { .. },
|
||||||
@ -449,7 +433,7 @@ impl BatchKind {
|
|||||||
allow_index_creation,
|
allow_index_creation,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(this @ BatchKind::ClearAndSettings { .. }, K::DocumentImport { .. }) => Break((this, BatchStopReason::SettingsWithDocumentOperation { id })),
|
(this @ BatchKind::ClearAndSettings { .. }, K::DocumentImport { .. }) => Break(this),
|
||||||
(
|
(
|
||||||
BatchKind::ClearAndSettings {
|
BatchKind::ClearAndSettings {
|
||||||
mut other,
|
mut other,
|
||||||
@ -505,7 +489,7 @@ pub fn autobatch(
|
|||||||
enqueued: Vec<(TaskId, KindWithContent)>,
|
enqueued: Vec<(TaskId, KindWithContent)>,
|
||||||
index_already_exists: bool,
|
index_already_exists: bool,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
) -> Option<(BatchKind, bool, Option<BatchStopReason>)> {
|
) -> Option<(BatchKind, bool)> {
|
||||||
let mut enqueued = enqueued.into_iter();
|
let mut enqueued = enqueued.into_iter();
|
||||||
let (id, kind) = enqueued.next()?;
|
let (id, kind) = enqueued.next()?;
|
||||||
|
|
||||||
@ -514,22 +498,18 @@ pub fn autobatch(
|
|||||||
|
|
||||||
let (mut acc, must_create_index) = match BatchKind::new(id, kind, primary_key) {
|
let (mut acc, must_create_index) = match BatchKind::new(id, kind, primary_key) {
|
||||||
(Continue(acc), create) => (acc, create),
|
(Continue(acc), create) => (acc, create),
|
||||||
(Break((acc, batch_stop_reason)), create) => {
|
(Break(acc), create) => return Some((acc, create)),
|
||||||
return Some((acc, create, Some(batch_stop_reason)))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// if an index has been created in the previous step we can consider it as existing.
|
// if an index has been created in the previous step we can consider it as existing.
|
||||||
index_exist |= must_create_index;
|
index_exist |= must_create_index;
|
||||||
|
|
||||||
for (id, kind_with_content) in enqueued {
|
for (id, kind) in enqueued {
|
||||||
acc = match acc.accumulate(id, kind_with_content, index_exist, primary_key) {
|
acc = match acc.accumulate(id, kind.into(), index_exist, primary_key) {
|
||||||
Continue(acc) => acc,
|
Continue(acc) => acc,
|
||||||
Break((acc, batch_stop_reason)) => {
|
Break(acc) => return Some((acc, must_create_index)),
|
||||||
return Some((acc, must_create_index, Some(batch_stop_reason)))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Some((acc, must_create_index, None))
|
Some((acc, must_create_index))
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
||||||
self, ReplaceDocuments, UpdateDocuments,
|
self, ReplaceDocuments, UpdateDocuments,
|
||||||
};
|
};
|
||||||
use meilisearch_types::tasks::{BatchStopReason, IndexSwap, KindWithContent};
|
use meilisearch_types::tasks::{IndexSwap, KindWithContent};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use self::autobatcher::{autobatch, BatchKind};
|
use self::autobatcher::{autobatch, BatchKind};
|
||||||
@ -20,7 +20,7 @@ fn autobatch_from(
|
|||||||
index_already_exists: bool,
|
index_already_exists: bool,
|
||||||
primary_key: Option<&str>,
|
primary_key: Option<&str>,
|
||||||
input: impl IntoIterator<Item = KindWithContent>,
|
input: impl IntoIterator<Item = KindWithContent>,
|
||||||
) -> Option<(BatchKind, bool, Option<BatchStopReason>)> {
|
) -> Option<(BatchKind, bool)> {
|
||||||
autobatch(
|
autobatch(
|
||||||
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
|
input.into_iter().enumerate().map(|(id, kind)| (id as TaskId, kind)).collect(),
|
||||||
index_already_exists,
|
index_already_exists,
|
||||||
@ -92,304 +92,304 @@ fn idx_swap() -> KindWithContent {
|
|||||||
fn autobatch_simple_operation_together() {
|
fn autobatch_simple_operation_together() {
|
||||||
// we can autobatch one or multiple `ReplaceDocuments` together.
|
// we can autobatch one or multiple `ReplaceDocuments` together.
|
||||||
// if the index exists.
|
// if the index exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// if it doesn't exists.
|
// if it doesn't exists.
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple `UpdateDocuments` together.
|
// we can autobatch one or multiple `UpdateDocuments` together.
|
||||||
// if the index exists.
|
// if the index exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// if it doesn't exists.
|
// if it doesn't exists.
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple DocumentDeletion together
|
// we can autobatch one or multiple DocumentDeletion together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del(), doc_del(), doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: false }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple DocumentDeletionByFilter together
|
// we can autobatch one or multiple DocumentDeletionByFilter together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), doc_del_fil(), doc_del_fil()]), @"Some((DocumentDeletion { deletion_ids: [0, 1, 2], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
// we can autobatch one or multiple Settings together
|
// we can autobatch one or multiple Settings together
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// We can autobatch document addition with document deletion
|
// We can autobatch document addition with document deletion
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
// And the other way around
|
// And the other way around
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false, None))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
|
|
||||||
// But we can't autobatch document addition with document deletion by filter
|
// But we can't autobatch document addition with document deletion by filter
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del_fil()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false, Some(DocumentOperationWithDeletionByFilter { id: 1 })))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del_fil()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||||
// And the other way around
|
// And the other way around
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del_fil(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn simple_different_document_operations_autobatch_together() {
|
fn simple_document_operation_dont_autobatch_with_other() {
|
||||||
// addition and updates with deletion by filter can't batch together
|
// addition, updates and deletion by filter can't batch together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithDeletionByFilter { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del_fil()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(DeletionByFilterWithDocumentOperation { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexCreation, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexUpdate, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false, Some(TaskCannotBeBatched { kind: IndexSwap, id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: true }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn document_addition_doesnt_batch_with_settings() {
|
fn document_addition_doesnt_batch_with_settings() {
|
||||||
// simple case
|
// simple case
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
// multiple settings and doc addition
|
// multiple settings and doc addition
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, Some(DocumentOperationWithSettings { id: 2 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, Some(DocumentOperationWithSettings { id: 2 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
|
|
||||||
// addition and setting unordered
|
// addition and setting unordered
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
// Doesn't batch with other forbidden operations
|
// Doesn't batch with other forbidden operations
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_and_additions() {
|
fn clear_and_additions() {
|
||||||
// these two doesn't need to batch
|
// these two doesn't need to batch
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
|
|
||||||
// Basic use case
|
// Basic use case
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
|
|
||||||
// This batch kind doesn't mix with other document addition
|
// This batch kind doesn't mix with other document addition
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, Some(DocumentOperationWithSettings { id: 3 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true, Some(DocumentOperationWithSettings { id: 3 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentClear { ids: [0, 1, 2] }, true))");
|
||||||
|
|
||||||
// But you can batch multiple clear together
|
// But you can batch multiple clear together
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_clr(), doc_clr(), doc_clr()]), @"Some((DocumentClear { ids: [0, 1, 2, 3, 4] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_and_additions_and_settings() {
|
fn clear_and_additions_and_settings() {
|
||||||
// A clear don't need to autobatch the settings that happens AFTER there is no documents
|
// A clear don't need to autobatch the settings that happens AFTER there is no documents
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), settings(true)]), @"Some((DocumentClear { ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), settings(true)]), @"Some((DocumentClear { ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), doc_clr(), settings(true)]), @"Some((ClearAndSettings { other: [1], allow_index_creation: true, settings_ids: [0, 2] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), doc_clr(), settings(true)]), @"Some((ClearAndSettings { other: [1], allow_index_creation: true, settings_ids: [0, 2] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_clr()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn anything_and_index_deletion() {
|
fn anything_and_index_deletion() {
|
||||||
// The `IndexDeletion` doesn't batch with anything that happens AFTER.
|
// The `IndexDeletion` doesn't batch with anything that happens AFTER.
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(true, None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_del_fil()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), doc_clr()]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(true)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false, Some(IndexDeletion { id: 0 })))");
|
debug_snapshot!(autobatch_from(false,None, [idx_del(), settings(false)]), @"Some((IndexDeletion { ids: [0] }, false))");
|
||||||
|
|
||||||
// The index deletion can accept almost any type of `BatchKind` and transform it to an `IndexDeletion`.
|
// The index deletion can accept almost any type of `BatchKind` and transform it to an `IndexDeletion`.
|
||||||
// First, the basic cases
|
// First, the basic cases
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_del_fil(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [settings(true), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false, Some(IndexDeletion { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false), idx_del()]), @"Some((IndexDeletion { ids: [0, 1] }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn allowed_and_disallowed_index_creation() {
|
fn allowed_and_disallowed_index_creation() {
|
||||||
// `DocumentImport` can't be mixed with those disallowed to do so except if the index already exists.
|
// `DocumentImport` can't be mixed with those disallowed to do so except if the index already exists.
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false, None))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(DocumentOperationWithSettings { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { allow_index_creation: false, primary_key: None, operation_ids: [0] }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||||
|
|
||||||
// batch deletion and addition
|
// batch deletion and addition
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false, Some(IndexCreationMismatch { id: 1 })))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0], includes_by_filter: false }, false))");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn autobatch_primary_key() {
|
fn autobatch_primary_key() {
|
||||||
// ==> If I have a pk
|
// ==> If I have a pk
|
||||||
// With a single update
|
// With a single update
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// With a multiple updates
|
// With a multiple updates
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1, 2] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1, 2] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: TaskPrimaryKeyDifferFromIndexPrimaryKey { task_pk: "other", index_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, Some(PrimaryKeyIndexMismatch { id: 0, in_index: "id", in_task: "other" })))"###);
|
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// ==> If I don't have a pk
|
// ==> If I don't have a pk
|
||||||
// With a single update
|
// With a single update
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||||
|
|
||||||
// With a multiple updates
|
// With a multiple updates
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true, None))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: None, operation_ids: [0] }, true, Some(PrimaryKeyMismatch { id: 1, reason: CannotInterfereWithPrimaryKeyGuessing { task_pk: "id" } })))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true, None))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ use std::fmt;
|
|||||||
use meilisearch_types::heed::RoTxn;
|
use meilisearch_types::heed::RoTxn;
|
||||||
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
use meilisearch_types::milli::update::IndexDocumentsMethod;
|
||||||
use meilisearch_types::settings::{Settings, Unchecked};
|
use meilisearch_types::settings::{Settings, Unchecked};
|
||||||
use meilisearch_types::tasks::{BatchStopReason, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
@ -54,8 +54,7 @@ pub(crate) enum Batch {
|
|||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum DocumentOperation {
|
pub(crate) enum DocumentOperation {
|
||||||
Replace(Uuid),
|
Add(Uuid),
|
||||||
Update(Uuid),
|
|
||||||
Delete(Vec<String>),
|
Delete(Vec<String>),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,6 +64,7 @@ pub(crate) enum IndexOperation {
|
|||||||
DocumentOperation {
|
DocumentOperation {
|
||||||
index_uid: String,
|
index_uid: String,
|
||||||
primary_key: Option<String>,
|
primary_key: Option<String>,
|
||||||
|
method: IndexDocumentsMethod,
|
||||||
operations: Vec<DocumentOperation>,
|
operations: Vec<DocumentOperation>,
|
||||||
tasks: Vec<Task>,
|
tasks: Vec<Task>,
|
||||||
},
|
},
|
||||||
@ -254,7 +254,7 @@ impl IndexScheduler {
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BatchKind::DocumentOperation { operation_ids, .. } => {
|
BatchKind::DocumentOperation { method, operation_ids, .. } => {
|
||||||
let tasks = self.queue.get_existing_tasks_for_processing_batch(
|
let tasks = self.queue.get_existing_tasks_for_processing_batch(
|
||||||
rtxn,
|
rtxn,
|
||||||
current_batch,
|
current_batch,
|
||||||
@ -276,17 +276,9 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
for task in tasks.iter() {
|
for task in tasks.iter() {
|
||||||
match task.kind {
|
match task.kind {
|
||||||
KindWithContent::DocumentAdditionOrUpdate {
|
KindWithContent::DocumentAdditionOrUpdate { content_file, .. } => {
|
||||||
content_file, method, ..
|
operations.push(DocumentOperation::Add(content_file));
|
||||||
} => match method {
|
}
|
||||||
IndexDocumentsMethod::ReplaceDocuments => {
|
|
||||||
operations.push(DocumentOperation::Replace(content_file))
|
|
||||||
}
|
|
||||||
IndexDocumentsMethod::UpdateDocuments => {
|
|
||||||
operations.push(DocumentOperation::Update(content_file))
|
|
||||||
}
|
|
||||||
_ => unreachable!("Unknown document merging method"),
|
|
||||||
},
|
|
||||||
KindWithContent::DocumentDeletion { ref documents_ids, .. } => {
|
KindWithContent::DocumentDeletion { ref documents_ids, .. } => {
|
||||||
operations.push(DocumentOperation::Delete(documents_ids.clone()));
|
operations.push(DocumentOperation::Delete(documents_ids.clone()));
|
||||||
}
|
}
|
||||||
@ -298,6 +290,7 @@ impl IndexScheduler {
|
|||||||
op: IndexOperation::DocumentOperation {
|
op: IndexOperation::DocumentOperation {
|
||||||
index_uid,
|
index_uid,
|
||||||
primary_key,
|
primary_key,
|
||||||
|
method,
|
||||||
operations,
|
operations,
|
||||||
tasks,
|
tasks,
|
||||||
},
|
},
|
||||||
@ -423,8 +416,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create the next batch to be processed;
|
/// Create the next batch to be processed;
|
||||||
/// 0. We get the *last* task to cancel.
|
/// 1. We get the *last* task to cancel.
|
||||||
/// 1. We get the tasks to upgrade.
|
|
||||||
/// 2. We get the *next* task to delete.
|
/// 2. We get the *next* task to delete.
|
||||||
/// 3. We get the *next* snapshot to process.
|
/// 3. We get the *next* snapshot to process.
|
||||||
/// 4. We get the *next* dump to process.
|
/// 4. We get the *next* dump to process.
|
||||||
@ -441,23 +433,9 @@ impl IndexScheduler {
|
|||||||
let mut current_batch = ProcessingBatch::new(batch_id);
|
let mut current_batch = ProcessingBatch::new(batch_id);
|
||||||
|
|
||||||
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
let enqueued = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
let count_total_enqueued = enqueued.len();
|
|
||||||
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
let failed = &self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
||||||
|
|
||||||
// 0. we get the last task to cancel.
|
// 0. The priority over everything is to upgrade the instance
|
||||||
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
|
||||||
if let Some(task_id) = to_cancel.max() {
|
|
||||||
let mut task =
|
|
||||||
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
|
||||||
current_batch.processing(Some(&mut task));
|
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::TaskCancelation,
|
|
||||||
id: task_id,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::TaskCancelation { task }, current_batch)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. We upgrade the instance
|
|
||||||
// There shouldn't be multiple upgrade tasks but just in case we're going to batch all of them at the same time
|
// There shouldn't be multiple upgrade tasks but just in case we're going to batch all of them at the same time
|
||||||
let upgrade = self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)? & (enqueued | failed);
|
let upgrade = self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)? & (enqueued | failed);
|
||||||
if !upgrade.is_empty() {
|
if !upgrade.is_empty() {
|
||||||
@ -468,26 +446,16 @@ impl IndexScheduler {
|
|||||||
current_batch.uid = batch_uid;
|
current_batch.uid = batch_uid;
|
||||||
}
|
}
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::UpgradeDatabase });
|
|
||||||
return Ok(Some((Batch::UpgradeDatabase { tasks }, current_batch)));
|
return Ok(Some((Batch::UpgradeDatabase { tasks }, current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the version of the scheduler here.
|
// 1. we get the last task to cancel.
|
||||||
// if the version is not the current, refuse to batch any additional task.
|
let to_cancel = self.queue.tasks.get_kind(rtxn, Kind::TaskCancelation)? & enqueued;
|
||||||
let version = self.version.get_version(rtxn)?;
|
if let Some(task_id) = to_cancel.max() {
|
||||||
let package_version = (
|
let mut task =
|
||||||
meilisearch_types::versioning::VERSION_MAJOR,
|
self.queue.tasks.get_task(rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
meilisearch_types::versioning::VERSION_MINOR,
|
current_batch.processing(Some(&mut task));
|
||||||
meilisearch_types::versioning::VERSION_PATCH,
|
return Ok(Some((Batch::TaskCancelation { task }, current_batch)));
|
||||||
);
|
|
||||||
if version != Some(package_version) {
|
|
||||||
return Err(Error::UnrecoverableError(Box::new(
|
|
||||||
Error::IndexSchedulerVersionMismatch {
|
|
||||||
index_scheduler_version: version.unwrap_or((1, 12, 0)),
|
|
||||||
package_version,
|
|
||||||
},
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. we get the next task to delete
|
// 2. we get the next task to delete
|
||||||
@ -495,8 +463,6 @@ impl IndexScheduler {
|
|||||||
if !to_delete.is_empty() {
|
if !to_delete.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_delete)?;
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::TaskDeletion });
|
|
||||||
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
return Ok(Some((Batch::TaskDeletions(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,8 +471,6 @@ impl IndexScheduler {
|
|||||||
if !to_snapshot.is_empty() {
|
if !to_snapshot.is_empty() {
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
let mut tasks = self.queue.tasks.get_existing_tasks(rtxn, to_snapshot)?;
|
||||||
current_batch.processing(&mut tasks);
|
current_batch.processing(&mut tasks);
|
||||||
current_batch
|
|
||||||
.reason(BatchStopReason::TaskKindCannotBeBatched { kind: Kind::SnapshotCreation });
|
|
||||||
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
return Ok(Some((Batch::SnapshotCreation(tasks), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,10 +480,6 @@ impl IndexScheduler {
|
|||||||
let mut task =
|
let mut task =
|
||||||
self.queue.tasks.get_task(rtxn, to_dump)?.ok_or(Error::CorruptedTaskQueue)?;
|
self.queue.tasks.get_task(rtxn, to_dump)?.ok_or(Error::CorruptedTaskQueue)?;
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::DumpCreation,
|
|
||||||
id: task.uid,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::Dump(task), current_batch)));
|
return Ok(Some((Batch::Dump(task), current_batch)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,10 +497,6 @@ impl IndexScheduler {
|
|||||||
} else {
|
} else {
|
||||||
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
assert!(matches!(&task.kind, KindWithContent::IndexSwap { swaps } if swaps.is_empty()));
|
||||||
current_batch.processing(Some(&mut task));
|
current_batch.processing(Some(&mut task));
|
||||||
current_batch.reason(BatchStopReason::TaskCannotBeBatched {
|
|
||||||
kind: Kind::IndexSwap,
|
|
||||||
id: task.uid,
|
|
||||||
});
|
|
||||||
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
return Ok(Some((Batch::IndexSwap { task }, current_batch)));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -562,14 +518,9 @@ impl IndexScheduler {
|
|||||||
1
|
1
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut stop_reason = BatchStopReason::default();
|
|
||||||
let mut enqueued = Vec::new();
|
let mut enqueued = Vec::new();
|
||||||
let mut total_size: u64 = 0;
|
let mut total_size: u64 = 0;
|
||||||
for task_id in index_tasks.into_iter() {
|
for task_id in index_tasks.into_iter().take(tasks_limit) {
|
||||||
if enqueued.len() >= tasks_limit {
|
|
||||||
stop_reason = BatchStopReason::ReachedTaskLimit { task_limit: tasks_limit };
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
let task = self
|
let task = self
|
||||||
.queue
|
.queue
|
||||||
.tasks
|
.tasks
|
||||||
@ -581,27 +532,16 @@ impl IndexScheduler {
|
|||||||
total_size = total_size.saturating_add(content_size);
|
total_size = total_size.saturating_add(content_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
let size_limit = self.scheduler.batched_tasks_size_limit;
|
if total_size > self.scheduler.batched_tasks_size_limit && !enqueued.is_empty() {
|
||||||
if total_size > size_limit && !enqueued.is_empty() {
|
|
||||||
stop_reason = BatchStopReason::ReachedSizeLimit { size_limit, size: total_size };
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
enqueued.push((task.uid, task.kind));
|
enqueued.push((task.uid, task.kind));
|
||||||
}
|
}
|
||||||
|
|
||||||
stop_reason.replace_unspecified({
|
if let Some((batchkind, create_index)) =
|
||||||
if enqueued.len() == count_total_enqueued as usize {
|
|
||||||
BatchStopReason::ExhaustedEnqueuedTasks
|
|
||||||
} else {
|
|
||||||
BatchStopReason::ExhaustedEnqueuedTasksForIndex { index: index_name.to_owned() }
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some((batchkind, create_index, autobatch_stop_reason)) =
|
|
||||||
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
autobatcher::autobatch(enqueued, index_already_exists, primary_key.as_deref())
|
||||||
{
|
{
|
||||||
current_batch.reason(autobatch_stop_reason.unwrap_or(stop_reason));
|
|
||||||
return Ok(self
|
return Ok(self
|
||||||
.create_next_batch_index(
|
.create_next_batch_index(
|
||||||
rtxn,
|
rtxn,
|
||||||
|
@ -20,12 +20,9 @@ use std::path::PathBuf;
|
|||||||
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use convert_case::{Case, Casing as _};
|
|
||||||
use meilisearch_types::error::ResponseError;
|
use meilisearch_types::error::ResponseError;
|
||||||
use meilisearch_types::heed::{Env, WithoutTls};
|
|
||||||
use meilisearch_types::milli;
|
use meilisearch_types::milli;
|
||||||
use meilisearch_types::tasks::Status;
|
use meilisearch_types::tasks::Status;
|
||||||
use process_batch::ProcessBatchInfo;
|
|
||||||
use rayon::current_num_threads;
|
use rayon::current_num_threads;
|
||||||
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
@ -74,18 +71,10 @@ pub struct Scheduler {
|
|||||||
pub(crate) snapshots_path: PathBuf,
|
pub(crate) snapshots_path: PathBuf,
|
||||||
|
|
||||||
/// The path to the folder containing the auth LMDB env.
|
/// The path to the folder containing the auth LMDB env.
|
||||||
pub(crate) auth_env: Env<WithoutTls>,
|
pub(crate) auth_path: PathBuf,
|
||||||
|
|
||||||
/// The path to the version file of Meilisearch.
|
/// The path to the version file of Meilisearch.
|
||||||
pub(crate) version_file_path: PathBuf,
|
pub(crate) version_file_path: PathBuf,
|
||||||
|
|
||||||
/// The maximal number of entries in the search query cache of an embedder.
|
|
||||||
///
|
|
||||||
/// 0 disables the cache.
|
|
||||||
pub(crate) embedding_cache_cap: usize,
|
|
||||||
|
|
||||||
/// Snapshot compaction status.
|
|
||||||
pub(crate) experimental_no_snapshot_compaction: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Scheduler {
|
impl Scheduler {
|
||||||
@ -98,14 +87,12 @@ impl Scheduler {
|
|||||||
batched_tasks_size_limit: self.batched_tasks_size_limit,
|
batched_tasks_size_limit: self.batched_tasks_size_limit,
|
||||||
dumps_path: self.dumps_path.clone(),
|
dumps_path: self.dumps_path.clone(),
|
||||||
snapshots_path: self.snapshots_path.clone(),
|
snapshots_path: self.snapshots_path.clone(),
|
||||||
auth_env: self.auth_env.clone(),
|
auth_path: self.auth_path.clone(),
|
||||||
version_file_path: self.version_file_path.clone(),
|
version_file_path: self.version_file_path.clone(),
|
||||||
embedding_cache_cap: self.embedding_cache_cap,
|
|
||||||
experimental_no_snapshot_compaction: self.experimental_no_snapshot_compaction,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(options: &IndexSchedulerOptions, auth_env: Env<WithoutTls>) -> Scheduler {
|
pub fn new(options: &IndexSchedulerOptions) -> Scheduler {
|
||||||
Scheduler {
|
Scheduler {
|
||||||
must_stop_processing: MustStopProcessing::default(),
|
must_stop_processing: MustStopProcessing::default(),
|
||||||
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
// we want to start the loop right away in case meilisearch was ctrl+Ced while processing things
|
||||||
@ -115,10 +102,8 @@ impl Scheduler {
|
|||||||
batched_tasks_size_limit: options.batched_tasks_size_limit,
|
batched_tasks_size_limit: options.batched_tasks_size_limit,
|
||||||
dumps_path: options.dumps_path.clone(),
|
dumps_path: options.dumps_path.clone(),
|
||||||
snapshots_path: options.snapshots_path.clone(),
|
snapshots_path: options.snapshots_path.clone(),
|
||||||
auth_env,
|
auth_path: options.auth_path.clone(),
|
||||||
version_file_path: options.version_file_path.clone(),
|
version_file_path: options.version_file_path.clone(),
|
||||||
embedding_cache_cap: options.embedding_cache_cap,
|
|
||||||
experimental_no_snapshot_compaction: options.experimental_no_snapshot_compaction,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -230,16 +215,14 @@ impl IndexScheduler {
|
|||||||
let mut stop_scheduler_forever = false;
|
let mut stop_scheduler_forever = false;
|
||||||
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
let mut wtxn = self.env.write_txn().map_err(Error::HeedTransaction)?;
|
||||||
let mut canceled = RoaringBitmap::new();
|
let mut canceled = RoaringBitmap::new();
|
||||||
let mut process_batch_info = ProcessBatchInfo::default();
|
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Ok((tasks, info)) => {
|
Ok(tasks) => {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchSucceeded);
|
self.breakpoint(crate::test_utils::Breakpoint::ProcessBatchSucceeded);
|
||||||
|
|
||||||
let (task_progress, task_progress_obj) = AtomicTaskStep::new(tasks.len() as u32);
|
let (task_progress, task_progress_obj) = AtomicTaskStep::new(tasks.len() as u32);
|
||||||
progress.update_progress(task_progress_obj);
|
progress.update_progress(task_progress_obj);
|
||||||
process_batch_info = info;
|
|
||||||
let mut success = 0;
|
let mut success = 0;
|
||||||
let mut failure = 0;
|
let mut failure = 0;
|
||||||
let mut canceled_by = None;
|
let mut canceled_by = None;
|
||||||
@ -356,60 +339,6 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// We must re-add the canceled task so they're part of the same batch.
|
// We must re-add the canceled task so they're part of the same batch.
|
||||||
ids |= canceled;
|
ids |= canceled;
|
||||||
|
|
||||||
let ProcessBatchInfo { congestion, pre_commit_dabases_sizes, post_commit_dabases_sizes } =
|
|
||||||
process_batch_info;
|
|
||||||
|
|
||||||
processing_batch.stats.progress_trace =
|
|
||||||
progress.accumulated_durations().into_iter().map(|(k, v)| (k, v.into())).collect();
|
|
||||||
processing_batch.stats.write_channel_congestion = congestion.map(|congestion| {
|
|
||||||
let mut congestion_info = serde_json::Map::new();
|
|
||||||
congestion_info.insert("attempts".into(), congestion.attempts.into());
|
|
||||||
congestion_info.insert("blocking_attempts".into(), congestion.blocking_attempts.into());
|
|
||||||
congestion_info.insert("blocking_ratio".into(), congestion.congestion_ratio().into());
|
|
||||||
congestion_info
|
|
||||||
});
|
|
||||||
processing_batch.stats.internal_database_sizes = pre_commit_dabases_sizes
|
|
||||||
.iter()
|
|
||||||
.flat_map(|(dbname, pre_size)| {
|
|
||||||
post_commit_dabases_sizes
|
|
||||||
.get(dbname)
|
|
||||||
.map(|post_size| {
|
|
||||||
use std::cmp::Ordering::{Equal, Greater, Less};
|
|
||||||
|
|
||||||
use byte_unit::Byte;
|
|
||||||
use byte_unit::UnitType::Binary;
|
|
||||||
|
|
||||||
let post = Byte::from_u64(*post_size as u64).get_appropriate_unit(Binary);
|
|
||||||
let diff_size = post_size.abs_diff(*pre_size) as u64;
|
|
||||||
let diff = Byte::from_u64(diff_size).get_appropriate_unit(Binary);
|
|
||||||
let sign = match post_size.cmp(pre_size) {
|
|
||||||
Equal => return None,
|
|
||||||
Greater => "+",
|
|
||||||
Less => "-",
|
|
||||||
};
|
|
||||||
|
|
||||||
Some((
|
|
||||||
dbname.to_case(Case::Camel),
|
|
||||||
format!("{post:#.2} ({sign}{diff:#.2})").into(),
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.into_iter()
|
|
||||||
.flatten()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if let Some(congestion) = congestion {
|
|
||||||
tracing::debug!(
|
|
||||||
"Channel congestion metrics - Attempts: {}, Blocked attempts: {} ({:.1}% congestion)",
|
|
||||||
congestion.attempts,
|
|
||||||
congestion.blocking_attempts,
|
|
||||||
congestion.congestion_ratio(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::debug!("call trace: {:?}", progress.accumulated_durations());
|
|
||||||
|
|
||||||
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
self.queue.write_batch(&mut wtxn, processing_batch, &ids)?;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -5,15 +5,14 @@ use std::sync::atomic::Ordering;
|
|||||||
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
use meilisearch_types::batches::{BatchEnqueuedAt, BatchId};
|
||||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||||
use meilisearch_types::milli::{self, ChannelCongestion};
|
use meilisearch_types::milli::{self};
|
||||||
use meilisearch_types::tasks::{Details, IndexSwap, Kind, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, IndexSwap, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::versioning::{VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH};
|
|
||||||
use milli::update::Settings as MilliSettings;
|
use milli::update::Settings as MilliSettings;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::create_batch::Batch;
|
use super::create_batch::Batch;
|
||||||
use crate::processing::{
|
use crate::processing::{
|
||||||
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress, FinalizingIndexStep,
|
AtomicBatchStep, AtomicTaskStep, CreateIndexProgress, DeleteIndexProgress,
|
||||||
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
InnerSwappingTwoIndexes, SwappingTheIndexes, TaskCancelationProgress, TaskDeletionProgress,
|
||||||
UpdateIndexProgress,
|
UpdateIndexProgress,
|
||||||
};
|
};
|
||||||
@ -23,16 +22,6 @@ use crate::utils::{
|
|||||||
};
|
};
|
||||||
use crate::{Error, IndexScheduler, Result, TaskId};
|
use crate::{Error, IndexScheduler, Result, TaskId};
|
||||||
|
|
||||||
#[derive(Debug, Default)]
|
|
||||||
pub struct ProcessBatchInfo {
|
|
||||||
/// The write channel congestion. None when unavailable: settings update.
|
|
||||||
pub congestion: Option<ChannelCongestion>,
|
|
||||||
/// The sizes of the different databases before starting the indexation.
|
|
||||||
pub pre_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
/// The sizes of the different databases after commiting the indexation.
|
|
||||||
pub post_commit_dabases_sizes: indexmap::IndexMap<&'static str, usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IndexScheduler {
|
impl IndexScheduler {
|
||||||
/// Apply the operation associated with the given batch.
|
/// Apply the operation associated with the given batch.
|
||||||
///
|
///
|
||||||
@ -46,7 +35,7 @@ impl IndexScheduler {
|
|||||||
batch: Batch,
|
batch: Batch,
|
||||||
current_batch: &mut ProcessingBatch,
|
current_batch: &mut ProcessingBatch,
|
||||||
progress: Progress,
|
progress: Progress,
|
||||||
) -> Result<(Vec<Task>, ProcessBatchInfo)> {
|
) -> Result<Vec<Task>> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
{
|
{
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::InsideProcessBatch)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::InsideProcessBatch)?;
|
||||||
@ -87,7 +76,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
canceled_tasks.push(task);
|
canceled_tasks.push(task);
|
||||||
|
|
||||||
Ok((canceled_tasks, ProcessBatchInfo::default()))
|
Ok(canceled_tasks)
|
||||||
}
|
}
|
||||||
Batch::TaskDeletions(mut tasks) => {
|
Batch::TaskDeletions(mut tasks) => {
|
||||||
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
// 1. Retrieve the tasks that matched the query at enqueue-time.
|
||||||
@ -126,14 +115,10 @@ impl IndexScheduler {
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
Batch::SnapshotCreation(tasks) => self
|
Batch::SnapshotCreation(tasks) => self.process_snapshot(progress, tasks),
|
||||||
.process_snapshot(progress, tasks)
|
Batch::Dump(task) => self.process_dump_creation(progress, task),
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
|
||||||
Batch::Dump(task) => self
|
|
||||||
.process_dump_creation(progress, task)
|
|
||||||
.map(|tasks| (tasks, ProcessBatchInfo::default())),
|
|
||||||
Batch::IndexOperation { op, must_create_index } => {
|
Batch::IndexOperation { op, must_create_index } => {
|
||||||
let index_uid = op.index_uid().to_string();
|
let index_uid = op.index_uid().to_string();
|
||||||
let index = if must_create_index {
|
let index = if must_create_index {
|
||||||
@ -145,28 +130,14 @@ impl IndexScheduler {
|
|||||||
self.index_mapper.index(&rtxn, &index_uid)?
|
self.index_mapper.index(&rtxn, &index_uid)?
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut index_wtxn = index.write_txn()?;
|
|
||||||
|
|
||||||
let index_version = index.get_version(&index_wtxn)?.unwrap_or((1, 12, 0));
|
|
||||||
let package_version = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH);
|
|
||||||
if index_version != package_version {
|
|
||||||
return Err(Error::IndexVersionMismatch {
|
|
||||||
index: index_uid,
|
|
||||||
index_version,
|
|
||||||
package_version,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
// the index operation can take a long time, so save this handle to make it available to the search for the duration of the tick
|
||||||
self.index_mapper
|
self.index_mapper
|
||||||
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
.set_currently_updating_index(Some((index_uid.clone(), index.clone())));
|
||||||
|
|
||||||
let pre_commit_dabases_sizes = index.database_sizes(&index_wtxn)?;
|
let mut index_wtxn = index.write_txn()?;
|
||||||
let (tasks, congestion) =
|
let tasks = self.apply_index_operation(&mut index_wtxn, &index, op, progress)?;
|
||||||
self.apply_index_operation(&mut index_wtxn, &index, op, &progress)?;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
progress.update_progress(FinalizingIndexStep::Committing);
|
|
||||||
let span = tracing::trace_span!(target: "indexing::scheduler", "commit");
|
let span = tracing::trace_span!(target: "indexing::scheduler", "commit");
|
||||||
let _entered = span.enter();
|
let _entered = span.enter();
|
||||||
|
|
||||||
@ -177,15 +148,12 @@ impl IndexScheduler {
|
|||||||
// stats of the index. Since the tasks have already been processed and
|
// stats of the index. Since the tasks have already been processed and
|
||||||
// this is a non-critical operation. If it fails, we should not fail
|
// this is a non-critical operation. If it fails, we should not fail
|
||||||
// the entire batch.
|
// the entire batch.
|
||||||
let mut post_commit_dabases_sizes = None;
|
|
||||||
let res = || -> Result<()> {
|
let res = || -> Result<()> {
|
||||||
progress.update_progress(FinalizingIndexStep::ComputingStats);
|
|
||||||
let index_rtxn = index.read_txn()?;
|
let index_rtxn = index.read_txn()?;
|
||||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(index_uid.to_string())))?;
|
||||||
let mut wtxn = self.env.write_txn()?;
|
let mut wtxn = self.env.write_txn()?;
|
||||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||||
post_commit_dabases_sizes = Some(index.database_sizes(&index_rtxn)?);
|
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}();
|
}();
|
||||||
@ -198,16 +166,7 @@ impl IndexScheduler {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
let info = ProcessBatchInfo {
|
Ok(tasks)
|
||||||
congestion,
|
|
||||||
// In case we fail to the get post-commit sizes we decide
|
|
||||||
// that nothing changed and use the pre-commit sizes.
|
|
||||||
post_commit_dabases_sizes: post_commit_dabases_sizes
|
|
||||||
.unwrap_or_else(|| pre_commit_dabases_sizes.clone()),
|
|
||||||
pre_commit_dabases_sizes,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((tasks, info))
|
|
||||||
}
|
}
|
||||||
Batch::IndexCreation { index_uid, primary_key, task } => {
|
Batch::IndexCreation { index_uid, primary_key, task } => {
|
||||||
progress.update_progress(CreateIndexProgress::CreatingTheIndex);
|
progress.update_progress(CreateIndexProgress::CreatingTheIndex);
|
||||||
@ -275,7 +234,7 @@ impl IndexScheduler {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
Ok(vec![task])
|
||||||
}
|
}
|
||||||
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
||||||
progress.update_progress(DeleteIndexProgress::DeletingTheIndex);
|
progress.update_progress(DeleteIndexProgress::DeletingTheIndex);
|
||||||
@ -309,9 +268,7 @@ impl IndexScheduler {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here we could also show that all the internal database sizes goes to 0
|
Ok(tasks)
|
||||||
// but it would mean opening the index and that's costly.
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
|
||||||
}
|
}
|
||||||
Batch::IndexSwap { mut task } => {
|
Batch::IndexSwap { mut task } => {
|
||||||
progress.update_progress(SwappingTheIndexes::EnsuringCorrectnessOfTheSwap);
|
progress.update_progress(SwappingTheIndexes::EnsuringCorrectnessOfTheSwap);
|
||||||
@ -359,17 +316,15 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
wtxn.commit()?;
|
wtxn.commit()?;
|
||||||
task.status = Status::Succeeded;
|
task.status = Status::Succeeded;
|
||||||
Ok((vec![task], ProcessBatchInfo::default()))
|
Ok(vec![task])
|
||||||
}
|
}
|
||||||
Batch::UpgradeDatabase { mut tasks } => {
|
Batch::UpgradeDatabase { mut tasks } => {
|
||||||
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
let KindWithContent::UpgradeDatabase { from } = tasks.last().unwrap().kind else {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(from, progress)));
|
let ret = catch_unwind(AssertUnwindSafe(|| self.process_upgrade(from, progress)));
|
||||||
match ret {
|
match ret {
|
||||||
Ok(Ok(())) => (),
|
Ok(Ok(())) => (),
|
||||||
Ok(Err(Error::AbortedTask)) => return Err(Error::AbortedTask),
|
|
||||||
Ok(Err(e)) => return Err(Error::DatabaseUpgrade(Box::new(e))),
|
Ok(Err(e)) => return Err(Error::DatabaseUpgrade(Box::new(e))),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
let msg = match e.downcast_ref::<&'static str>() {
|
let msg = match e.downcast_ref::<&'static str>() {
|
||||||
@ -391,7 +346,7 @@ impl IndexScheduler {
|
|||||||
task.error = None;
|
task.error = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tasks, ProcessBatchInfo::default()))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -667,79 +622,17 @@ impl IndexScheduler {
|
|||||||
progress: &Progress,
|
progress: &Progress,
|
||||||
) -> Result<Vec<Task>> {
|
) -> Result<Vec<Task>> {
|
||||||
progress.update_progress(TaskCancelationProgress::RetrievingTasks);
|
progress.update_progress(TaskCancelationProgress::RetrievingTasks);
|
||||||
let mut tasks_to_cancel = RoaringBitmap::new();
|
|
||||||
|
|
||||||
let enqueued_tasks = &self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
|
||||||
|
|
||||||
// 0. Check if any upgrade task was matched.
|
|
||||||
// If so, we cancel all the failed or enqueued upgrade tasks.
|
|
||||||
let upgrade_tasks = &self.queue.tasks.get_kind(rtxn, Kind::UpgradeDatabase)?;
|
|
||||||
let is_canceling_upgrade = !matched_tasks.is_disjoint(upgrade_tasks);
|
|
||||||
if is_canceling_upgrade {
|
|
||||||
let failed_tasks = self.queue.tasks.get_status(rtxn, Status::Failed)?;
|
|
||||||
tasks_to_cancel |= upgrade_tasks & (enqueued_tasks | failed_tasks);
|
|
||||||
}
|
|
||||||
// 1. Remove from this list the tasks that we are not allowed to cancel
|
// 1. Remove from this list the tasks that we are not allowed to cancel
|
||||||
// Notice that only the _enqueued_ ones are cancelable and we should
|
// Notice that only the _enqueued_ ones are cancelable and we should
|
||||||
// have already aborted the indexation of the _processing_ ones
|
// have already aborted the indexation of the _processing_ ones
|
||||||
tasks_to_cancel |= enqueued_tasks & matched_tasks;
|
let cancelable_tasks = self.queue.tasks.get_status(rtxn, Status::Enqueued)?;
|
||||||
|
let tasks_to_cancel = cancelable_tasks & matched_tasks;
|
||||||
|
|
||||||
// 2. If we're canceling an upgrade, attempt the rollback
|
|
||||||
if let Some(latest_upgrade_task) = (&tasks_to_cancel & upgrade_tasks).max() {
|
|
||||||
progress.update_progress(TaskCancelationProgress::CancelingUpgrade);
|
|
||||||
|
|
||||||
let task = self.queue.tasks.get_task(rtxn, latest_upgrade_task)?.unwrap();
|
|
||||||
let Some(Details::UpgradeDatabase { from, to }) = task.details else {
|
|
||||||
unreachable!("wrong details for upgrade task {latest_upgrade_task}")
|
|
||||||
};
|
|
||||||
|
|
||||||
// check that we are rollbacking an upgrade to the current Meilisearch
|
|
||||||
let bin_major: u32 = meilisearch_types::versioning::VERSION_MAJOR;
|
|
||||||
let bin_minor: u32 = meilisearch_types::versioning::VERSION_MINOR;
|
|
||||||
let bin_patch: u32 = meilisearch_types::versioning::VERSION_PATCH;
|
|
||||||
|
|
||||||
if to == (bin_major, bin_minor, bin_patch) {
|
|
||||||
tracing::warn!(
|
|
||||||
"Rollbacking from v{}.{}.{} to v{}.{}.{}",
|
|
||||||
to.0,
|
|
||||||
to.1,
|
|
||||||
to.2,
|
|
||||||
from.0,
|
|
||||||
from.1,
|
|
||||||
from.2
|
|
||||||
);
|
|
||||||
match std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
|
||||||
self.process_rollback(from, progress)
|
|
||||||
})) {
|
|
||||||
Ok(Ok(())) => {}
|
|
||||||
Ok(Err(err)) => return Err(Error::DatabaseUpgrade(Box::new(err))),
|
|
||||||
Err(e) => {
|
|
||||||
let msg = match e.downcast_ref::<&'static str>() {
|
|
||||||
Some(s) => *s,
|
|
||||||
None => match e.downcast_ref::<String>() {
|
|
||||||
Some(s) => &s[..],
|
|
||||||
None => "Box<dyn Any>",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return Err(Error::DatabaseUpgrade(Box::new(Error::ProcessBatchPanicked(
|
|
||||||
msg.to_string(),
|
|
||||||
))));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
tracing::debug!(
|
|
||||||
"Not rollbacking an upgrade targetting the earlier version v{}.{}.{}",
|
|
||||||
bin_major,
|
|
||||||
bin_minor,
|
|
||||||
bin_patch
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. We now have a list of tasks to cancel, cancel them
|
|
||||||
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
let (task_progress, progress_obj) = AtomicTaskStep::new(tasks_to_cancel.len() as u32);
|
||||||
progress.update_progress(progress_obj);
|
progress.update_progress(progress_obj);
|
||||||
|
|
||||||
|
// 2. We now have a list of tasks to cancel, cancel them
|
||||||
let mut tasks = self.queue.tasks.get_existing_tasks(
|
let mut tasks = self.queue.tasks.get_existing_tasks(
|
||||||
rtxn,
|
rtxn,
|
||||||
tasks_to_cancel.iter().inspect(|_| {
|
tasks_to_cancel.iter().inspect(|_| {
|
||||||
|
@ -5,7 +5,7 @@ use meilisearch_types::milli::documents::PrimaryKey;
|
|||||||
use meilisearch_types::milli::progress::Progress;
|
use meilisearch_types::milli::progress::Progress;
|
||||||
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
|
use meilisearch_types::milli::update::new::indexer::{self, UpdateByFunction};
|
||||||
use meilisearch_types::milli::update::DocumentAdditionResult;
|
use meilisearch_types::milli::update::DocumentAdditionResult;
|
||||||
use meilisearch_types::milli::{self, ChannelCongestion, Filter, ThreadPoolNoAbortBuilder};
|
use meilisearch_types::milli::{self, Filter, ThreadPoolNoAbortBuilder};
|
||||||
use meilisearch_types::settings::apply_settings_to_builder;
|
use meilisearch_types::settings::apply_settings_to_builder;
|
||||||
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
use meilisearch_types::tasks::{Details, KindWithContent, Status, Task};
|
||||||
use meilisearch_types::Index;
|
use meilisearch_types::Index;
|
||||||
@ -32,9 +32,10 @@ impl IndexScheduler {
|
|||||||
index_wtxn: &mut RwTxn<'i>,
|
index_wtxn: &mut RwTxn<'i>,
|
||||||
index: &'i Index,
|
index: &'i Index,
|
||||||
operation: IndexOperation,
|
operation: IndexOperation,
|
||||||
progress: &Progress,
|
progress: Progress,
|
||||||
) -> Result<(Vec<Task>, Option<ChannelCongestion>)> {
|
) -> Result<Vec<Task>> {
|
||||||
let indexer_alloc = Bump::new();
|
let indexer_alloc = Bump::new();
|
||||||
|
|
||||||
let started_processing_at = std::time::Instant::now();
|
let started_processing_at = std::time::Instant::now();
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
||||||
|
|
||||||
@ -59,23 +60,25 @@ impl IndexScheduler {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tasks, None))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
IndexOperation::DocumentOperation { index_uid, primary_key, operations, mut tasks } => {
|
IndexOperation::DocumentOperation {
|
||||||
|
index_uid,
|
||||||
|
primary_key,
|
||||||
|
method,
|
||||||
|
operations,
|
||||||
|
mut tasks,
|
||||||
|
} => {
|
||||||
progress.update_progress(DocumentOperationProgress::RetrievingConfig);
|
progress.update_progress(DocumentOperationProgress::RetrievingConfig);
|
||||||
// TODO: at some point, for better efficiency we might want to reuse the bumpalo for successive batches.
|
// TODO: at some point, for better efficiency we might want to reuse the bumpalo for successive batches.
|
||||||
// this is made difficult by the fact we're doing private clones of the index scheduler and sending it
|
// this is made difficult by the fact we're doing private clones of the index scheduler and sending it
|
||||||
// to a fresh thread.
|
// to a fresh thread.
|
||||||
let mut content_files = Vec::new();
|
let mut content_files = Vec::new();
|
||||||
for operation in &operations {
|
for operation in &operations {
|
||||||
match operation {
|
if let DocumentOperation::Add(content_uuid) = operation {
|
||||||
DocumentOperation::Replace(content_uuid)
|
let content_file = self.queue.file_store.get_update(*content_uuid)?;
|
||||||
| DocumentOperation::Update(content_uuid) => {
|
let mmap = unsafe { memmap2::Mmap::map(&content_file)? };
|
||||||
let content_file = self.queue.file_store.get_update(*content_uuid)?;
|
content_files.push(mmap);
|
||||||
let mmap = unsafe { memmap2::Mmap::map(&content_file)? };
|
|
||||||
content_files.push(mmap);
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,23 +87,17 @@ impl IndexScheduler {
|
|||||||
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
let mut new_fields_ids_map = db_fields_ids_map.clone();
|
||||||
|
|
||||||
let mut content_files_iter = content_files.iter();
|
let mut content_files_iter = content_files.iter();
|
||||||
let mut indexer = indexer::DocumentOperation::new();
|
let mut indexer = indexer::DocumentOperation::new(method);
|
||||||
let embedders = index
|
let embedders = index
|
||||||
.embedding_configs(index_wtxn)
|
.embedding_configs(index_wtxn)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||||
for operation in operations {
|
for operation in operations {
|
||||||
match operation {
|
match operation {
|
||||||
DocumentOperation::Replace(_content_uuid) => {
|
DocumentOperation::Add(_content_uuid) => {
|
||||||
let mmap = content_files_iter.next().unwrap();
|
let mmap = content_files_iter.next().unwrap();
|
||||||
indexer
|
indexer
|
||||||
.replace_documents(mmap)
|
.add_documents(mmap)
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
|
||||||
}
|
|
||||||
DocumentOperation::Update(_content_uuid) => {
|
|
||||||
let mmap = content_files_iter.next().unwrap();
|
|
||||||
indexer
|
|
||||||
.update_documents(mmap)
|
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
||||||
}
|
}
|
||||||
DocumentOperation::Delete(document_ids) => {
|
DocumentOperation::Delete(document_ids) => {
|
||||||
@ -172,24 +169,21 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
progress.update_progress(DocumentOperationProgress::Indexing);
|
progress.update_progress(DocumentOperationProgress::Indexing);
|
||||||
let mut congestion = None;
|
|
||||||
if tasks.iter().any(|res| res.error.is_none()) {
|
if tasks.iter().any(|res| res.error.is_none()) {
|
||||||
congestion = Some(
|
indexer::index(
|
||||||
indexer::index(
|
index_wtxn,
|
||||||
index_wtxn,
|
index,
|
||||||
index,
|
pool,
|
||||||
pool,
|
indexer_config.grenad_parameters(),
|
||||||
indexer_config.grenad_parameters(),
|
&db_fields_ids_map,
|
||||||
&db_fields_ids_map,
|
new_fields_ids_map,
|
||||||
new_fields_ids_map,
|
primary_key,
|
||||||
primary_key,
|
&document_changes,
|
||||||
&document_changes,
|
embedders,
|
||||||
embedders,
|
&|| must_stop_processing.get(),
|
||||||
&|| must_stop_processing.get(),
|
&progress,
|
||||||
progress,
|
)
|
||||||
)
|
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?;
|
||||||
.map_err(|e| Error::from_milli(e, Some(index_uid.clone())))?,
|
|
||||||
);
|
|
||||||
|
|
||||||
let addition = DocumentAdditionResult {
|
let addition = DocumentAdditionResult {
|
||||||
indexed_documents: candidates_count,
|
indexed_documents: candidates_count,
|
||||||
@ -201,7 +195,7 @@ impl IndexScheduler {
|
|||||||
tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tasks, congestion))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
IndexOperation::DocumentEdition { index_uid, mut task } => {
|
IndexOperation::DocumentEdition { index_uid, mut task } => {
|
||||||
progress.update_progress(DocumentEditionProgress::RetrievingConfig);
|
progress.update_progress(DocumentEditionProgress::RetrievingConfig);
|
||||||
@ -249,7 +243,7 @@ impl IndexScheduler {
|
|||||||
edited_documents: Some(0),
|
edited_documents: Some(0),
|
||||||
});
|
});
|
||||||
|
|
||||||
return Ok((vec![task], None));
|
return Ok(vec![task]);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rtxn = index.read_txn()?;
|
let rtxn = index.read_txn()?;
|
||||||
@ -264,7 +258,6 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
let result_count = Ok((candidates.len(), candidates.len())) as Result<_>;
|
let result_count = Ok((candidates.len(), candidates.len())) as Result<_>;
|
||||||
|
|
||||||
let mut congestion = None;
|
|
||||||
if task.error.is_none() {
|
if task.error.is_none() {
|
||||||
let local_pool;
|
let local_pool;
|
||||||
let indexer_config = self.index_mapper.indexer_config();
|
let indexer_config = self.index_mapper.indexer_config();
|
||||||
@ -295,22 +288,20 @@ impl IndexScheduler {
|
|||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentEditionProgress::Indexing);
|
progress.update_progress(DocumentEditionProgress::Indexing);
|
||||||
congestion = Some(
|
indexer::index(
|
||||||
indexer::index(
|
index_wtxn,
|
||||||
index_wtxn,
|
index,
|
||||||
index,
|
pool,
|
||||||
pool,
|
indexer_config.grenad_parameters(),
|
||||||
indexer_config.grenad_parameters(),
|
&db_fields_ids_map,
|
||||||
&db_fields_ids_map,
|
new_fields_ids_map,
|
||||||
new_fields_ids_map,
|
None, // cannot change primary key in DocumentEdition
|
||||||
None, // cannot change primary key in DocumentEdition
|
&document_changes,
|
||||||
&document_changes,
|
embedders,
|
||||||
embedders,
|
&|| must_stop_processing.get(),
|
||||||
&|| must_stop_processing.get(),
|
&progress,
|
||||||
progress,
|
)
|
||||||
)
|
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
|
||||||
);
|
|
||||||
|
|
||||||
let addition = DocumentAdditionResult {
|
let addition = DocumentAdditionResult {
|
||||||
indexed_documents: candidates_count,
|
indexed_documents: candidates_count,
|
||||||
@ -346,7 +337,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((vec![task], congestion))
|
Ok(vec![task])
|
||||||
}
|
}
|
||||||
IndexOperation::DocumentDeletion { mut tasks, index_uid } => {
|
IndexOperation::DocumentDeletion { mut tasks, index_uid } => {
|
||||||
progress.update_progress(DocumentDeletionProgress::RetrievingConfig);
|
progress.update_progress(DocumentDeletionProgress::RetrievingConfig);
|
||||||
@ -413,7 +404,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if to_delete.is_empty() {
|
if to_delete.is_empty() {
|
||||||
return Ok((tasks, None));
|
return Ok(tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
let rtxn = index.read_txn()?;
|
let rtxn = index.read_txn()?;
|
||||||
@ -427,7 +418,6 @@ impl IndexScheduler {
|
|||||||
PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
PrimaryKey::new_or_insert(primary_key, &mut new_fields_ids_map)
|
||||||
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err.into(), Some(index_uid.clone())))?;
|
||||||
|
|
||||||
let mut congestion = None;
|
|
||||||
if !tasks.iter().all(|res| res.error.is_some()) {
|
if !tasks.iter().all(|res| res.error.is_some()) {
|
||||||
let local_pool;
|
let local_pool;
|
||||||
let indexer_config = self.index_mapper.indexer_config();
|
let indexer_config = self.index_mapper.indexer_config();
|
||||||
@ -453,22 +443,20 @@ impl IndexScheduler {
|
|||||||
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
let embedders = self.embedders(index_uid.clone(), embedders)?;
|
||||||
|
|
||||||
progress.update_progress(DocumentDeletionProgress::Indexing);
|
progress.update_progress(DocumentDeletionProgress::Indexing);
|
||||||
congestion = Some(
|
indexer::index(
|
||||||
indexer::index(
|
index_wtxn,
|
||||||
index_wtxn,
|
index,
|
||||||
index,
|
pool,
|
||||||
pool,
|
indexer_config.grenad_parameters(),
|
||||||
indexer_config.grenad_parameters(),
|
&db_fields_ids_map,
|
||||||
&db_fields_ids_map,
|
new_fields_ids_map,
|
||||||
new_fields_ids_map,
|
None, // document deletion never changes primary key
|
||||||
None, // document deletion never changes primary key
|
&document_changes,
|
||||||
&document_changes,
|
embedders,
|
||||||
embedders,
|
&|| must_stop_processing.get(),
|
||||||
&|| must_stop_processing.get(),
|
&progress,
|
||||||
progress,
|
)
|
||||||
)
|
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?,
|
|
||||||
);
|
|
||||||
|
|
||||||
let addition = DocumentAdditionResult {
|
let addition = DocumentAdditionResult {
|
||||||
indexed_documents: candidates_count,
|
indexed_documents: candidates_count,
|
||||||
@ -480,7 +468,7 @@ impl IndexScheduler {
|
|||||||
tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
tracing::info!(indexing_result = ?addition, processed_in = ?started_processing_at.elapsed(), "document indexing done");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((tasks, congestion))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
IndexOperation::Settings { index_uid, settings, mut tasks } => {
|
IndexOperation::Settings { index_uid, settings, mut tasks } => {
|
||||||
progress.update_progress(SettingsProgress::RetrievingAndMergingTheSettings);
|
progress.update_progress(SettingsProgress::RetrievingAndMergingTheSettings);
|
||||||
@ -505,7 +493,7 @@ impl IndexScheduler {
|
|||||||
)
|
)
|
||||||
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
.map_err(|err| Error::from_milli(err, Some(index_uid.clone())))?;
|
||||||
|
|
||||||
Ok((tasks, None))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
IndexOperation::DocumentClearAndSetting {
|
IndexOperation::DocumentClearAndSetting {
|
||||||
index_uid,
|
index_uid,
|
||||||
@ -513,17 +501,17 @@ impl IndexScheduler {
|
|||||||
settings,
|
settings,
|
||||||
settings_tasks,
|
settings_tasks,
|
||||||
} => {
|
} => {
|
||||||
let (mut import_tasks, _congestion) = self.apply_index_operation(
|
let mut import_tasks = self.apply_index_operation(
|
||||||
index_wtxn,
|
index_wtxn,
|
||||||
index,
|
index,
|
||||||
IndexOperation::DocumentClear {
|
IndexOperation::DocumentClear {
|
||||||
index_uid: index_uid.clone(),
|
index_uid: index_uid.clone(),
|
||||||
tasks: cleared_tasks,
|
tasks: cleared_tasks,
|
||||||
},
|
},
|
||||||
progress,
|
progress.clone(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let (settings_tasks, _congestion) = self.apply_index_operation(
|
let settings_tasks = self.apply_index_operation(
|
||||||
index_wtxn,
|
index_wtxn,
|
||||||
index,
|
index,
|
||||||
IndexOperation::Settings { index_uid, settings, tasks: settings_tasks },
|
IndexOperation::Settings { index_uid, settings, tasks: settings_tasks },
|
||||||
@ -532,7 +520,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
let mut tasks = settings_tasks;
|
let mut tasks = settings_tasks;
|
||||||
tasks.append(&mut import_tasks);
|
tasks.append(&mut import_tasks);
|
||||||
Ok((tasks, None))
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ use std::sync::atomic::Ordering;
|
|||||||
|
|
||||||
use meilisearch_types::heed::CompactionOption;
|
use meilisearch_types::heed::CompactionOption;
|
||||||
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
|
||||||
|
use meilisearch_types::milli::{self};
|
||||||
use meilisearch_types::tasks::{Status, Task};
|
use meilisearch_types::tasks::{Status, Task};
|
||||||
use meilisearch_types::{compression, VERSION_FILE_NAME};
|
use meilisearch_types::{compression, VERSION_FILE_NAME};
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
// 2. Snapshot the index-scheduler LMDB env
|
// 2. Snapshot the index-scheduler LMDB env
|
||||||
//
|
//
|
||||||
// When we call copy_to_path, LMDB opens a read transaction by itself,
|
// When we call copy_to_file, LMDB opens a read transaction by itself,
|
||||||
// we can't provide our own. It is an issue as we would like to know
|
// we can't provide our own. It is an issue as we would like to know
|
||||||
// the update files to copy but new ones can be enqueued between the copy
|
// the update files to copy but new ones can be enqueued between the copy
|
||||||
// of the env and the new transaction we open to retrieve the enqueued tasks.
|
// of the env and the new transaction we open to retrieve the enqueued tasks.
|
||||||
@ -41,12 +42,7 @@ impl IndexScheduler {
|
|||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
|
||||||
let dst = temp_snapshot_dir.path().join("tasks");
|
let dst = temp_snapshot_dir.path().join("tasks");
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
let compaction_option = if self.scheduler.experimental_no_snapshot_compaction {
|
self.env.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||||
CompactionOption::Disabled
|
|
||||||
} else {
|
|
||||||
CompactionOption::Enabled
|
|
||||||
};
|
|
||||||
self.env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
|
|
||||||
|
|
||||||
// 2.2 Create a read transaction on the index-scheduler
|
// 2.2 Create a read transaction on the index-scheduler
|
||||||
let rtxn = self.env.read_txn()?;
|
let rtxn = self.env.read_txn()?;
|
||||||
@ -85,7 +81,7 @@ impl IndexScheduler {
|
|||||||
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
index
|
index
|
||||||
.copy_to_path(dst.join("data.mdb"), compaction_option)
|
.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)
|
||||||
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,7 +91,14 @@ impl IndexScheduler {
|
|||||||
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
|
||||||
let dst = temp_snapshot_dir.path().join("auth");
|
let dst = temp_snapshot_dir.path().join("auth");
|
||||||
fs::create_dir_all(&dst)?;
|
fs::create_dir_all(&dst)?;
|
||||||
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
|
// TODO We can't use the open_auth_store_env function here but we should
|
||||||
|
let auth = unsafe {
|
||||||
|
milli::heed::EnvOpenOptions::new()
|
||||||
|
.map_size(1024 * 1024 * 1024) // 1 GiB
|
||||||
|
.max_dbs(2)
|
||||||
|
.open(&self.scheduler.auth_path)
|
||||||
|
}?;
|
||||||
|
auth.copy_to_file(dst.join("data.mdb"), CompactionOption::Enabled)?;
|
||||||
|
|
||||||
// 5. Copy and tarball the flat snapshot
|
// 5. Copy and tarball the flat snapshot
|
||||||
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
|
||||||
|
@ -12,14 +12,10 @@ impl IndexScheduler {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
self.maybe_fail(crate::test_utils::FailureLocation::ProcessUpgrade)?;
|
self.maybe_fail(crate::test_utils::FailureLocation::ProcessUpgrade)?;
|
||||||
|
|
||||||
|
enum UpgradeIndex {}
|
||||||
let indexes = self.index_names()?;
|
let indexes = self.index_names()?;
|
||||||
|
|
||||||
for (i, uid) in indexes.iter().enumerate() {
|
for (i, uid) in indexes.iter().enumerate() {
|
||||||
let must_stop_processing = self.scheduler.must_stop_processing.clone();
|
|
||||||
|
|
||||||
if must_stop_processing.get() {
|
|
||||||
return Err(Error::AbortedTask);
|
|
||||||
}
|
|
||||||
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
||||||
format!("Upgrading index `{uid}`"),
|
format!("Upgrading index `{uid}`"),
|
||||||
i as u32,
|
i as u32,
|
||||||
@ -31,7 +27,6 @@ impl IndexScheduler {
|
|||||||
&mut index_wtxn,
|
&mut index_wtxn,
|
||||||
&index,
|
&index,
|
||||||
db_version,
|
db_version,
|
||||||
|| must_stop_processing.get(),
|
|
||||||
progress.clone(),
|
progress.clone(),
|
||||||
)
|
)
|
||||||
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
.map_err(|e| Error::from_milli(e, Some(uid.to_string())))?;
|
||||||
@ -51,42 +46,4 @@ impl IndexScheduler {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_rollback(&self, db_version: (u32, u32, u32), progress: &Progress) -> Result<()> {
|
|
||||||
let mut wtxn = self.env.write_txn()?;
|
|
||||||
tracing::info!(?db_version, "roll back index scheduler version");
|
|
||||||
self.version.set_version(&mut wtxn, db_version)?;
|
|
||||||
let db_path = self.scheduler.version_file_path.parent().unwrap();
|
|
||||||
wtxn.commit()?;
|
|
||||||
|
|
||||||
let indexes = self.index_names()?;
|
|
||||||
|
|
||||||
tracing::info!("roll backing all indexes");
|
|
||||||
for (i, uid) in indexes.iter().enumerate() {
|
|
||||||
progress.update_progress(VariableNameStep::<UpgradeIndex>::new(
|
|
||||||
format!("Rollbacking index `{uid}`"),
|
|
||||||
i as u32,
|
|
||||||
indexes.len() as u32,
|
|
||||||
));
|
|
||||||
let index_schd_rtxn = self.env.read_txn()?;
|
|
||||||
|
|
||||||
let rollback_outcome =
|
|
||||||
self.index_mapper.rollback_index(&index_schd_rtxn, uid, db_version)?;
|
|
||||||
if !rollback_outcome.succeeded() {
|
|
||||||
return Err(crate::Error::RollbackFailed { index: uid.clone(), rollback_outcome });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tracing::info!(?db_path, ?db_version, "roll back version file");
|
|
||||||
meilisearch_types::versioning::create_version_file(
|
|
||||||
db_path,
|
|
||||||
db_version.0,
|
|
||||||
db_version.1,
|
|
||||||
db_version.2,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum UpgradeIndex {}
|
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test_embedders.rs
|
source: crates/index-scheduler/src/scheduler/test_embedders.rs
|
||||||
expression: simple_hf_config.embedder_options
|
expression: simple_hf_config.embedder_options
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
{
|
{
|
||||||
"HuggingFace": {
|
"HuggingFace": {
|
||||||
"model": "sentence-transformers/all-MiniLM-L6-v2",
|
"model": "sentence-transformers/all-MiniLM-L6-v2",
|
||||||
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
|
"revision": "e4ce9877abf3edfe10b0d82785e83bdcb973e22e",
|
||||||
"distribution": null,
|
"distribution": null
|
||||||
"pooling": "useModel"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -39,7 +40,7 @@ catto [0,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `taskCancelation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, stop reason: "batched all enqueued tasks for index `beavero`", }
|
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
@ -46,7 +47,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -49,8 +50,8 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [1,2,3,]
|
[timestamp] [1,2,3,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"receivedDocuments":2,"indexedDocuments":0,"matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query"}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"documentAdditionOrUpdate":2,"taskCancelation":1},"indexUids":{"beavero":1,"wolfo":1}}, stop reason: "created batch containing only task with id 3 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"receivedDocuments":2,"indexedDocuments":0,"matchedTasks":3,"canceledTasks":2,"originalFilter":"test_query"}, stats: {"totalNbTasks":3,"status":{"succeeded":1,"canceled":2},"types":{"documentAdditionOrUpdate":2,"taskCancelation":1},"indexUids":{"beavero":1,"wolfo":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -41,7 +42,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(1):
|
### Processing batch Some(1):
|
||||||
[1,]
|
[1,]
|
||||||
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, stop reason: "batched all enqueued tasks for index `beavero`", }
|
{uid: 1, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"beavero":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, batch_uid: 0, status: succeeded, details: { received_documents: 1, indexed_documents: Some(1) }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
@ -45,7 +46,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks for index `catto`", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -38,7 +39,7 @@ canceled [0,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"matchedTasks":1,"canceledTasks":1,"originalFilter":"cancel dump"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"taskCancelation":1,"dumpCreation":1},"indexUids":{}}, stop reason: "created batch containing only task with id 1 of type `taskCancelation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"matchedTasks":1,"canceledTasks":1,"originalFilter":"cancel dump"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"taskCancelation":1,"dumpCreation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"dumpUid":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"dumpCreation":1},"indexUids":{}}, stop reason: "created batch containing only task with id 0 of type `dumpCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"dumpUid":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"dumpCreation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
0 {uid: 0, status: enqueued, details: { dump_uid: None }, kind: DumpCreation { keys: [], instance_uid: None }}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -40,7 +41,7 @@ catto: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, stop reason: "created batch containing only task with id 1 of type `taskCancelation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"matchedTasks":1,"canceledTasks":1,"originalFilter":"test_query"}, stats: {"totalNbTasks":2,"status":{"succeeded":1,"canceled":1},"types":{"documentAdditionOrUpdate":1,"taskCancelation":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
{uid: 0, details: {"receivedDocuments":1,"indexedDocuments":null}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
0 {uid: 0, status: enqueued, details: { received_documents: 1, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "catto", primary_key: None, method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 1, allow_index_creation: true }}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -40,8 +41,8 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
1 {uid: 1, details: {"matchedTasks":1,"canceledTasks":0,"originalFilter":"test_query"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"taskCancelation":1},"indexUids":{}}, stop reason: "created batch containing only task with id 1 of type `taskCancelation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {"matchedTasks":1,"canceledTasks":0,"originalFilter":"test_query"}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"taskCancelation":1},"indexUids":{}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -35,7 +36,7 @@ catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, stop reason: "batched all enqueued tasks", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":1}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentAdditionOrUpdate":1},"indexUids":{"catto":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -60,12 +61,12 @@ girafos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [5,]
|
[timestamp] [5,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, }
|
||||||
2 {uid: 2, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafos":1}}, stop reason: "created batch containing only task with id 2 of type `indexCreation` that cannot be batched with any other task.", }
|
2 {uid: 2, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"girafos":1}}, }
|
||||||
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, stop reason: "batched all enqueued tasks for index `doggos`", }
|
3 {uid: 3, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"doggos":1}}, }
|
||||||
4 {uid: 4, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"cattos":1}}, stop reason: "batched all enqueued tasks for index `cattos`", }
|
4 {uid: 4, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"cattos":1}}, }
|
||||||
5 {uid: 5, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"girafos":1}}, stop reason: "batched all enqueued tasks", }
|
5 {uid: 5, details: {"deletedDocuments":0}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"documentDeletion":1},"indexUids":{"girafos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -41,7 +42,7 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -42,8 +43,8 @@ doggos [0,1,2,]
|
|||||||
[timestamp] [1,2,]
|
[timestamp] [1,2,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, stop reason: "stopped after task with id 2 because it deletes the index", }
|
1 {uid: 1, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -37,7 +38,7 @@ doggos [0,1,]
|
|||||||
[timestamp] [0,1,]
|
[timestamp] [0,1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, stop reason: "stopped after task with id 1 because it deletes the index", }
|
0 {uid: 0, details: {"receivedDocuments":1,"indexedDocuments":0,"deletedDocuments":0}, stats: {"totalNbTasks":2,"status":{"succeeded":2},"types":{"documentAdditionOrUpdate":1,"indexDeletion":1},"indexUids":{"doggos":2}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,1,]
|
0 [0,1,]
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch Some(0):
|
### Processing batch Some(0):
|
||||||
[0,]
|
[0,]
|
||||||
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
{uid: 0, details: {"primaryKey":"id"}, stats: {"totalNbTasks":1,"status":{"processing":1},"types":{"indexCreation":1},"indexUids":{"index_a":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Tasks:
|
### All Tasks:
|
||||||
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
0 {uid: 0, status: enqueued, details: { primary_key: Some("id") }, kind: IndexCreation { index_uid: "index_a", primary_key: Some("id") }}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -41,7 +42,7 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [0,]
|
[timestamp] [0,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
source: crates/index-scheduler/src/scheduler/test.rs
|
source: crates/index-scheduler/src/scheduler/test.rs
|
||||||
|
snapshot_kind: text
|
||||||
---
|
---
|
||||||
### Autobatching Enabled = true
|
### Autobatching Enabled = true
|
||||||
### Processing batch None:
|
### Processing batch None:
|
||||||
@ -44,8 +45,8 @@ doggos: { number_of_documents: 0, field_distribution: {} }
|
|||||||
[timestamp] [1,]
|
[timestamp] [1,]
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### All Batches:
|
### All Batches:
|
||||||
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, stop reason: "created batch containing only task with id 0 of type `indexCreation` that cannot be batched with any other task.", }
|
0 {uid: 0, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"doggos":1}}, }
|
||||||
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, stop reason: "created batch containing only task with id 1 of type `indexCreation` that cannot be batched with any other task.", }
|
1 {uid: 1, details: {}, stats: {"totalNbTasks":1,"status":{"succeeded":1},"types":{"indexCreation":1},"indexUids":{"cattos":1}}, }
|
||||||
----------------------------------------------------------------------
|
----------------------------------------------------------------------
|
||||||
### Batch to tasks mapping:
|
### Batch to tasks mapping:
|
||||||
0 [0,]
|
0 [0,]
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user