mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-01 10:15:50 +00:00
Compare commits
6 Commits
prototype-
...
prototype-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
56fd9637cd | ||
|
|
1bb8ace177 | ||
|
|
e343f9a337 | ||
|
|
0725e51335 | ||
|
|
0bf35c7cc6 | ||
|
|
541d38d9d4 |
10
.github/scripts/check-release.sh
vendored
10
.github/scripts/check-release.sh
vendored
@@ -3,7 +3,7 @@
|
||||
# check_tag $current_tag $file_tag $file_name
|
||||
function check_tag {
|
||||
if [[ "$1" != "$2" ]]; then
|
||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
||||
echo "Error: the current tag does not match the version in $3: found $2 - expected $1"
|
||||
ret=1
|
||||
fi
|
||||
}
|
||||
@@ -11,8 +11,12 @@ function check_tag {
|
||||
ret=0
|
||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
||||
|
||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag
|
||||
toml_files='*/Cargo.toml'
|
||||
for toml_file in $toml_files;
|
||||
do
|
||||
file_tag="$(grep '^version = ' $toml_file | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag $toml_file
|
||||
done
|
||||
|
||||
lock_file='Cargo.lock'
|
||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
||||
|
||||
30
.github/uffizzi/Dockerfile
vendored
30
.github/uffizzi/Dockerfile
vendored
@@ -1,3 +1,23 @@
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
RUN apk add -q --update-cache --no-cache build-base openssl-dev
|
||||
|
||||
WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ENV COMMIT_SHA=${COMMIT_SHA} COMMIT_DATE=${COMMIT_DATE}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
export JEMALLOC_SYS_WITH_LG_PAGE=16; \
|
||||
fi && \
|
||||
cargo build --release
|
||||
|
||||
# Run
|
||||
FROM uffizzi/ttyd:alpine
|
||||
|
||||
@@ -8,11 +28,19 @@ ENV MEILI_NO_ANALYTICS true
|
||||
RUN apk update --quiet \
|
||||
&& apk add -q --no-cache libgcc tini curl
|
||||
|
||||
COPY target/x86_64-unknown-linux-musl/release/meilisearch /bin/meilisearch
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
# to find.
|
||||
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
||||
# This directory should hold all the data related to meilisearch so we're going
|
||||
# to move our PWD in there.
|
||||
# We don't want to put the meilisearch binary
|
||||
WORKDIR /meili_data
|
||||
|
||||
|
||||
EXPOSE 7700/tcp
|
||||
|
||||
ENTRYPOINT ["tini", "--"]
|
||||
|
||||
40
.github/workflows/publish-binaries.yml
vendored
40
.github/workflows/publish-binaries.yml
vendored
@@ -96,12 +96,14 @@ jobs:
|
||||
|
||||
publish-macos-apple-silicon:
|
||||
name: Publish binary for macOS silicon
|
||||
runs-on: macos-12
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: aarch64-apple-darwin
|
||||
- os: macos-12
|
||||
target: aarch64-apple-darwin
|
||||
asset_name: meilisearch-macos-apple-silicon
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -130,29 +132,21 @@ jobs:
|
||||
|
||||
publish-aarch64:
|
||||
name: Publish binary for aarch64
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
needs: check-version
|
||||
container:
|
||||
# Use ubuntu-18.04 to compile with glibc 2.27
|
||||
image: ubuntu:18.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
- build: aarch64
|
||||
os: ubuntu-18.04
|
||||
target: aarch64-unknown-linux-gnu
|
||||
linker: gcc-aarch64-linux-gnu
|
||||
use-cross: true
|
||||
asset_name: meilisearch-linux-aarch64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update -y && apt upgrade -y
|
||||
apt-get install -y curl build-essential gcc-aarch64-linux-gnu
|
||||
- name: Set up Docker for cross compilation
|
||||
run: |
|
||||
apt-get install -y curl apt-transport-https ca-certificates software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
apt-get update -y && apt-get install -y docker-ce
|
||||
- name: Installing Rust toolchain
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@@ -160,7 +154,15 @@ jobs:
|
||||
profile: minimal
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
- name: APT update
|
||||
run: |
|
||||
sudo apt update
|
||||
- name: Install target specific tools
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
sudo apt-get install -y ${{ matrix.linker }}
|
||||
- name: Configure target aarch64 GNU
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
## Environment variable is not passed using env:
|
||||
## LD gold won't work with MUSL
|
||||
# env:
|
||||
@@ -174,10 +176,8 @@ jobs:
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
use-cross: true
|
||||
use-cross: ${{ matrix.use-cross }}
|
||||
args: --release --target ${{ matrix.target }}
|
||||
env:
|
||||
CROSS_DOCKER_IN_DOCKER: true
|
||||
- name: List target output files
|
||||
run: ls -lR ./target
|
||||
- name: Upload the binary to release
|
||||
|
||||
1
.github/workflows/publish-docker-images.yml
vendored
1
.github/workflows/publish-docker-images.yml
vendored
@@ -92,7 +92,6 @@ jobs:
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
GIT_TAG=${{ github.ref_name }}
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
|
||||
13
.github/workflows/rust.yml
vendored
13
.github/workflows/rust.yml
vendored
@@ -2,9 +2,6 @@ name: Rust
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Everyday at 5:00am
|
||||
- cron: '0 5 * * *'
|
||||
pull_request:
|
||||
push:
|
||||
# trying and staging branches are for Bors config
|
||||
@@ -30,18 +27,10 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y curl
|
||||
apt-get install build-essential -y
|
||||
- name: Run test with Rust stable
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run test with Rust nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
# Disable cache due to disk space issues with Windows workers in CI
|
||||
# - name: Cache dependencies
|
||||
# uses: Swatinem/rust-cache@v2.2.0
|
||||
|
||||
20
.github/workflows/uffizzi-build.yml
vendored
20
.github/workflows/uffizzi-build.yml
vendored
@@ -14,26 +14,6 @@ jobs:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- run: sudo apt-get install musl-tools
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
target: x86_64-unknown-linux-musl
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.2.0
|
||||
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --target x86_64-unknown-linux-musl --release
|
||||
|
||||
- name: Remove dockerignore so we can use the target folder in our docker build
|
||||
run: rm -f .dockerignore
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
|
||||
6
.github/workflows/uffizzi-preview-deploy.yml
vendored
6
.github/workflows/uffizzi-preview-deploy.yml
vendored
@@ -82,7 +82,7 @@ jobs:
|
||||
name: Use Remote Workflow to Preview on Uffizzi
|
||||
needs:
|
||||
- cache-compose-file
|
||||
uses: UffizziCloud/preview-action/.github/workflows/reusable.yaml@v2
|
||||
uses: UffizziCloud/preview-action/.github/workflows/reusable.yaml@desc
|
||||
with:
|
||||
# If this workflow was triggered by a PR close event, cache-key will be an empty string
|
||||
# and this reusable workflow will delete the preview deployment.
|
||||
@@ -95,8 +95,8 @@ jobs:
|
||||
`meilisearch` command. You should be able to access this instance of meilisearch running in
|
||||
the preview from the link Meilisearch Endpoint link given below.
|
||||
|
||||
Web Terminal Endpoint : <uffizzi-url>
|
||||
Meilisearch Endpoint : <uffizzi-url>/meilisearch
|
||||
Web Terminal Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}
|
||||
Meilisearch Endpoint : ${{ needs.cache-compose-file.outputs.expected-url }}/meilisearch
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
14
.github/workflows/update-cargo-toml-version.yml
vendored
14
.github/workflows/update-cargo-toml-version.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Update Meilisearch version in Cargo.toml
|
||||
name: Update Meilisearch version in all Cargo.toml files
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -14,7 +14,7 @@ env:
|
||||
|
||||
jobs:
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -25,23 +25,23 @@ jobs:
|
||||
override: true
|
||||
- name: Install sd
|
||||
run: cargo install sd
|
||||
- name: Update Cargo.toml file
|
||||
- name: Update Cargo.toml files
|
||||
run: |
|
||||
raw_new_version=$(echo $NEW_VERSION | cut -d 'v' -f 2)
|
||||
new_string="version = \"$raw_new_version\""
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" Cargo.toml
|
||||
sd '^version = "\d+.\d+.\w+"$' "$new_string" */Cargo.toml
|
||||
- name: Build Meilisearch to update Cargo.lock
|
||||
run: cargo build
|
||||
- name: Commit and push the changes to the ${{ env.NEW_BRANCH }} branch
|
||||
uses: EndBug/add-and-commit@v9
|
||||
with:
|
||||
message: "Update version for the next release (${{ env.NEW_VERSION }}) in Cargo.toml"
|
||||
message: "Update version for the next release (${{ env.NEW_VERSION }}) in Cargo.toml files"
|
||||
new_branch: ${{ env.NEW_BRANCH }}
|
||||
- name: Create the PR pointing to ${{ github.ref_name }}
|
||||
run: |
|
||||
gh pr create \
|
||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml" \
|
||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one and Cargo.lock has been updated before merging.' \
|
||||
--title "Update version for the next release ($NEW_VERSION) in Cargo.toml files" \
|
||||
--body '⚠️ This PR is automatically generated. Check the new version is the expected one before merging.' \
|
||||
--label 'skip changelog' \
|
||||
--milestone $NEW_VERSION \
|
||||
--base $GITHUB_REF_NAME
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,3 @@
|
||||
.idea/
|
||||
.vscode/
|
||||
/target
|
||||
**/*.csv
|
||||
**/*.json_lines
|
||||
|
||||
@@ -121,19 +121,15 @@ The full Meilisearch release process is described in [this guide](https://github
|
||||
Depending on the developed feature, you might need to provide a prototyped version of Meilisearch to make it easier to test by the users.
|
||||
|
||||
The prototype name must follow this convention: `prototype-X-Y` where
|
||||
- `X` is the feature name formatted in `kebab-case`. It should not end with a single number.
|
||||
- `X` is the feature name formatted in `kebab-case`
|
||||
- `Y` is the version of the prototype, starting from `0`.
|
||||
|
||||
✅ Example: `prototype-auto-resize-0`. </br>
|
||||
❌ Bad example: `auto-resize-0`: lacks the `prototype` prefix. </br>
|
||||
❌ Bad example: `prototype-auto-resize`: lacks the version suffix. </br>
|
||||
❌ Bad example: `prototype-auto-resize-0-0`: feature name ends with a single number.
|
||||
Example: `prototype-auto-resize-0`.
|
||||
|
||||
Steps to create a prototype:
|
||||
|
||||
1. In your terminal, go to the last commit of your branch (the one you want to provide as a prototype).
|
||||
2. Create a tag following the convention: `git tag prototype-X-Y`
|
||||
3. Run Meilisearch and check that its launch summary features a line: `Prototype: prototype-X-Y` (you may need to switch branches and back after tagging for this to work).
|
||||
3. Push the tag: `git push origin prototype-X-Y`
|
||||
4. Check the [Docker CI](https://github.com/meilisearch/meilisearch/actions/workflows/publish-docker-images.yml) is now running.
|
||||
|
||||
@@ -142,7 +138,7 @@ More information about [how to run Meilisearch with Docker](https://docs.meilise
|
||||
|
||||
⚙️ However, no binaries will be created. If the users do not use Docker, they can go to the `prototype-X-Y` tag in the Meilisearch repository and compile from the source code.
|
||||
|
||||
⚠️ When sharing a prototype with users, remind them to not use it in production. Prototypes are solely for test purposes.
|
||||
⚠️ When sharing a prototype with users, prevent them from using it in production. Prototypes are only for test purposes.
|
||||
|
||||
### Release assets
|
||||
|
||||
|
||||
258
Cargo.lock
generated
258
Cargo.lock
generated
@@ -36,9 +36,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "actix-http"
|
||||
version = "3.3.0"
|
||||
version = "3.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0070905b2c4a98d184c4e81025253cb192aa8a73827553f38e9410801ceb35bb"
|
||||
checksum = "0c83abf9903e1f0ad9973cc4f7b9767fd5a03a583f51a5b7a339e07987cd2724"
|
||||
dependencies = [
|
||||
"actix-codec",
|
||||
"actix-rt",
|
||||
@@ -46,7 +46,7 @@ dependencies = [
|
||||
"actix-tls",
|
||||
"actix-utils",
|
||||
"ahash",
|
||||
"base64 0.21.0",
|
||||
"base64 0.13.1",
|
||||
"bitflags",
|
||||
"brotli",
|
||||
"bytes",
|
||||
@@ -68,10 +68,7 @@ dependencies = [
|
||||
"rand",
|
||||
"sha1",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"zstd 0.12.3+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -167,9 +164,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "actix-web"
|
||||
version = "4.3.0"
|
||||
version = "4.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "464e0fddc668ede5f26ec1f9557a8d44eda948732f40c6b0ad79126930eb775f"
|
||||
checksum = "d48f7b6534e06c7bfc72ee91db7917d4af6afe23e7d223b51e68fffbb21e96b9"
|
||||
dependencies = [
|
||||
"actix-codec",
|
||||
"actix-http",
|
||||
@@ -410,7 +407,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
||||
|
||||
[[package]]
|
||||
name = "benchmarks"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
@@ -517,6 +514,12 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "build_const"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4ae4235e6dac0694637c763029ecea1a2ec9e4e06ec2729bd21ba4d9c863eb7"
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.11.1"
|
||||
@@ -603,9 +606,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cargo_toml"
|
||||
version = "0.14.1"
|
||||
version = "0.13.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2bfbc36312494041e2cdd5f06697b7e89d4b76f42773a0b5556ac290ff22acc2"
|
||||
checksum = "497049e9477329f8f6a559972ee42e117487d01d1e8c2cc9f836ea6fa23a9e1a"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"toml",
|
||||
@@ -653,19 +656,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "charabia"
|
||||
version = "0.7.1"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ad3d9667a6b4e03813162c22c4d58235c2dc25d580d60837ce29199038341c9"
|
||||
checksum = "b57f9571f611796ea38e5a9c12e5ce37476f70397b032757f8dfe0c7b9bc5637"
|
||||
dependencies = [
|
||||
"cow-utils",
|
||||
"csv",
|
||||
"deunicode",
|
||||
"fst",
|
||||
"irg-kvariants",
|
||||
"jieba-rs",
|
||||
"lindera",
|
||||
"lindera-ipadic",
|
||||
"lindera-ko-dic",
|
||||
"once_cell",
|
||||
"pinyin",
|
||||
"serde",
|
||||
@@ -718,9 +718,14 @@ version = "3.2.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"bitflags",
|
||||
"clap_derive 3.2.18",
|
||||
"clap_lex 0.2.4",
|
||||
"indexmap",
|
||||
"once_cell",
|
||||
"strsim",
|
||||
"termcolor",
|
||||
"textwrap",
|
||||
]
|
||||
|
||||
@@ -731,7 +736,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"clap_derive",
|
||||
"clap_derive 4.0.21",
|
||||
"clap_lex 0.3.0",
|
||||
"is-terminal",
|
||||
"once_cell",
|
||||
@@ -739,6 +744,19 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "3.2.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea0c8bce528c4be4da13ea6fead8965e95b6073585a2f05204bd8f4119f82a65"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.0.21"
|
||||
@@ -852,6 +870,15 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb"
|
||||
dependencies = [
|
||||
"build_const",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.3.2"
|
||||
@@ -1083,26 +1110,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "deserr"
|
||||
version = "0.5.0"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71c14985c842bf1e520b1ebcd22daff6aeece32f510e11f063cecf9b308c04b"
|
||||
checksum = "28380303ca15ec07e1d5b079baf19cf849b09edad5cab219c1c51b2bd07523de"
|
||||
dependencies = [
|
||||
"actix-http",
|
||||
"actix-utils",
|
||||
"actix-web",
|
||||
"deserr-internal",
|
||||
"futures",
|
||||
"serde-cs",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"strsim",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deserr-internal"
|
||||
version = "0.5.0"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cae1c51b191528c9e4e5d6cff671de94f61fcda1c206cc891251e0cf438c941a"
|
||||
checksum = "860928cd8af78d223a3d70dd581f21d7c3de8aa2eecd938e0c0a399ded7c1451"
|
||||
dependencies = [
|
||||
"convert_case 0.5.0",
|
||||
"proc-macro2",
|
||||
@@ -1150,7 +1171,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dump"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -1303,19 +1324,6 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
|
||||
dependencies = [
|
||||
"humantime",
|
||||
"is-terminal",
|
||||
"log",
|
||||
"regex",
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.2.8"
|
||||
@@ -1371,7 +1379,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "file-store"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"faux",
|
||||
"tempfile",
|
||||
@@ -1393,7 +1401,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "filter-parser"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"nom",
|
||||
@@ -1413,7 +1421,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -1890,7 +1898,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "index-scheduler"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"big_s",
|
||||
@@ -1904,7 +1912,6 @@ dependencies = [
|
||||
"insta",
|
||||
"log",
|
||||
"meili-snap",
|
||||
"meilisearch-auth",
|
||||
"meilisearch-types",
|
||||
"nelson",
|
||||
"page_size 0.5.0",
|
||||
@@ -1970,17 +1977,6 @@ version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146"
|
||||
|
||||
[[package]]
|
||||
name = "irg-kvariants"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c73214298363629cf9dbfc93b426808865ee3c121029778cb31b1284104fdf78"
|
||||
dependencies = [
|
||||
"csv",
|
||||
"once_cell",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is-terminal"
|
||||
version = "0.4.2"
|
||||
@@ -2049,7 +2045,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "json-depth-checker"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"criterion",
|
||||
"serde_json",
|
||||
@@ -2069,15 +2065,6 @@ dependencies = [
|
||||
"simple_asn1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kanaria"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0f9d9652540055ac4fded998a73aca97d965899077ab1212587437da44196ff"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "language-tags"
|
||||
version = "0.3.2"
|
||||
@@ -2147,15 +2134,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera"
|
||||
version = "0.21.1"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f33a20bb9cbf95572b2d2f40d7040c8d8c7ad09ae20e1f6513db6ef2564dfc5"
|
||||
checksum = "082ca91ac4d1557028ace9bfb8cee1500d156a4574dda93cfcdcf4caaebb9bd7"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"encoding",
|
||||
"kanaria",
|
||||
"lindera-cc-cedict-builder",
|
||||
"lindera-core",
|
||||
"lindera-dictionary",
|
||||
@@ -2164,27 +2150,24 @@ dependencies = [
|
||||
"lindera-ko-dic",
|
||||
"lindera-ko-dic-builder",
|
||||
"lindera-unidic-builder",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"unicode-blocks",
|
||||
"unicode-normalization",
|
||||
"yada",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lindera-cc-cedict-builder"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "60c3b379251edadbac7a5fdb31e482274e11dae6ab6cc789d0d86cf34369cf49"
|
||||
checksum = "a8967615a6d85320ec2755e1435c36165467ba01a79026adc3f86dad1b668df3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"clap 3.2.23",
|
||||
"csv",
|
||||
"encoding",
|
||||
"env_logger 0.10.0",
|
||||
"env_logger",
|
||||
"glob",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
@@ -2192,29 +2175,17 @@ dependencies = [
|
||||
"yada",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lindera-compress"
|
||||
version = "0.21.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8d0ea3de5625e2381cac94e518d3b56103fde56bc0dce840fe875c1e871b125"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"flate2",
|
||||
"lindera-decompress",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lindera-core"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2281747b98fdd46bcc54ce7fdb6870dad9f67ddb3dc086c47b6704f3e1178cd5"
|
||||
checksum = "0e8ed3cea13f73557a4574a179b1518670a3b70bfdad120521313b03cc89380e"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"encoding_rs",
|
||||
"log",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"thiserror",
|
||||
"yada",
|
||||
@@ -2222,20 +2193,20 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-decompress"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52101bd454754c506305ab897af5ac2ae41fe91e3272c1ff5c6a02a089dfaefd"
|
||||
checksum = "2badb41828f89cfa6452db0a66da77897c0a04478304de26c8b2b36613e08d43"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"flate2",
|
||||
"lzma-rs",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lindera-dictionary"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af1c6668848f1d30d216c99093a3ed3fe125c105fa12a4aeed5a1861dc01dd52"
|
||||
checksum = "e219722c9f56b920c231210e7c25d8b5d35b508e7a2fd69d368916c4b1c926f6"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
@@ -2245,16 +2216,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-ipadic"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "693098007200fa43fd5cdc9ca8740f371327369672ce812cd87a1f6344971e31"
|
||||
checksum = "2c8e87c8362c724e8188fb7d9b6d184cac15d01369295e9bff7812b630d57e3b"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"encoding",
|
||||
"flate2",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
"lindera-ipadic-builder",
|
||||
"once_cell",
|
||||
"tar",
|
||||
@@ -2262,19 +2232,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-ipadic-builder"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7b6b7240d097a8fc37ee8f90ebff02c4db0ba5325ecb0dacb6da3724596798c9"
|
||||
checksum = "1439e95852e444a116424086dc64d709c90e8af269ff7d2c2c4020f666f8dfab"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"clap 3.2.23",
|
||||
"csv",
|
||||
"encoding_rs",
|
||||
"encoding_rs_io",
|
||||
"env_logger 0.10.0",
|
||||
"env_logger",
|
||||
"glob",
|
||||
"lindera-compress",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
"log",
|
||||
@@ -2284,16 +2254,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-ko-dic"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "abd3c5a4addeb61ca66788a3dd1fd51093e6cd8fea1d997042ada5aa60e8cc5e"
|
||||
checksum = "cb15f949220da45872d774b7831bb030855ec083435c907499782f8558c8a203"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"encoding",
|
||||
"flate2",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
"lindera-ko-dic-builder",
|
||||
"once_cell",
|
||||
"tar",
|
||||
@@ -2301,18 +2270,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-ko-dic-builder"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "512bb1393a9281e0b13704319d1343b7931416865852d9d7b7c0178431518326"
|
||||
checksum = "fde5a7352f4754be4f741e90bf4dff38a12a6572ab3880d0cf688e1166b8d82b"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"clap 3.2.23",
|
||||
"csv",
|
||||
"encoding",
|
||||
"env_logger 0.10.0",
|
||||
"env_logger",
|
||||
"glob",
|
||||
"lindera-compress",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
"log",
|
||||
@@ -2321,16 +2290,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "lindera-unidic-builder"
|
||||
version = "0.21.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f575a27f8ba67c15fe16ebf7d277a0ac04e8c8a0f72670ebc2443da9d41c450"
|
||||
checksum = "f1451b2ed8a7184a5f815d84f99d358c1d67297305831453dfdc0eb5d08e22b5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bincode",
|
||||
"byteorder",
|
||||
"clap 3.2.23",
|
||||
"csv",
|
||||
"encoding",
|
||||
"env_logger 0.10.0",
|
||||
"env_logger",
|
||||
"glob",
|
||||
"lindera-core",
|
||||
"lindera-decompress",
|
||||
@@ -2419,6 +2389,16 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lzma-rs"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aba8ecb0450dfabce4ad72085eed0a75dffe8f21f7ada05638564ea9db2d7fb1"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"crc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "manifest-dir-macros"
|
||||
version = "0.1.16"
|
||||
@@ -2445,7 +2425,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
||||
|
||||
[[package]]
|
||||
name = "meili-snap"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"insta",
|
||||
"md5",
|
||||
@@ -2454,7 +2434,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"actix-cors",
|
||||
"actix-http",
|
||||
@@ -2477,7 +2457,7 @@ dependencies = [
|
||||
"deserr",
|
||||
"dump",
|
||||
"either",
|
||||
"env_logger 0.9.3",
|
||||
"env_logger",
|
||||
"file-store",
|
||||
"flate2",
|
||||
"fst",
|
||||
@@ -2542,12 +2522,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-auth"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"enum-iterator",
|
||||
"hmac",
|
||||
"maplit",
|
||||
"meilisearch-types",
|
||||
"rand",
|
||||
"roaring",
|
||||
@@ -2561,7 +2540,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "meilisearch-types"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"actix-web",
|
||||
"anyhow",
|
||||
@@ -2615,7 +2594,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "milli"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"bimap",
|
||||
@@ -2969,7 +2948,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
|
||||
|
||||
[[package]]
|
||||
name = "permissive-json-pointer"
|
||||
version = "1.1.0"
|
||||
version = "1.0.0"
|
||||
dependencies = [
|
||||
"big_s",
|
||||
"serde_json",
|
||||
@@ -3888,9 +3867,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.24.2"
|
||||
version = "1.24.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb"
|
||||
checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes",
|
||||
@@ -4022,12 +4001,6 @@ version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-blocks"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9de2be6bad6f56ce8373d377e611cbb2265de3a656138065609ce82e217aad70"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.6"
|
||||
@@ -4449,7 +4422,7 @@ dependencies = [
|
||||
"pbkdf2",
|
||||
"sha1",
|
||||
"time",
|
||||
"zstd 0.11.2+zstd.1.5.2",
|
||||
"zstd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4458,16 +4431,7 @@ version = "0.11.2+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4"
|
||||
dependencies = [
|
||||
"zstd-safe 5.0.2+zstd.1.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.12.3+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76eea132fb024e0e13fd9c2f5d5d595d8a967aa72382ac2f9d39fcc95afd0806"
|
||||
dependencies = [
|
||||
"zstd-safe 6.0.4+zstd.1.5.4",
|
||||
"zstd-safe",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4480,21 +4444,11 @@ dependencies = [
|
||||
"zstd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-safe"
|
||||
version = "6.0.4+zstd.1.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"zstd-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd-sys"
|
||||
version = "2.0.7+zstd.1.5.4"
|
||||
version = "2.0.5+zstd.1.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5"
|
||||
checksum = "edc50ffce891ad571e9f9afe5039c4837bede781ac4bb13052ed7ae695518596"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
||||
@@ -16,15 +16,6 @@ members = [
|
||||
"benchmarks"
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.1.0"
|
||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||
description = "Meilisearch HTTP server"
|
||||
homepage = "https://meilisearch.com"
|
||||
readme = "README.md"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
|
||||
@@ -7,8 +7,7 @@ WORKDIR /meilisearch
|
||||
|
||||
ARG COMMIT_SHA
|
||||
ARG COMMIT_DATE
|
||||
ARG GIT_TAG
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version = "1.0.0"
|
||||
edition = "2018"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
csv = "1.1.6"
|
||||
|
||||
@@ -29,7 +29,7 @@ fn bench_formatting(c: &mut criterion::Criterion) {
|
||||
(vec![Rc::new(MatchingWord::new("thedoord".to_string(), 1, true).unwrap())], vec![0, 1, 2]),
|
||||
(vec![Rc::new(MatchingWord::new("doord".to_string(), 1, true).unwrap())], vec![1, 2]),
|
||||
]
|
||||
).unwrap(), TokenizerBuilder::default().build()),
|
||||
), TokenizerBuilder::default().build()),
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
10
config.toml
10
config.toml
@@ -118,13 +118,3 @@ ssl_resumption = false
|
||||
ssl_tickets = false
|
||||
# Activates SSL tickets.
|
||||
# https://docs.meilisearch.com/learn/configuration/instance_options.html#ssl-tickets
|
||||
|
||||
#############################
|
||||
### Experimental features ###
|
||||
#############################
|
||||
|
||||
experimental_enable_metrics = false
|
||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
[package]
|
||||
name = "dump"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
license.workspace = true
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
|
||||
@@ -203,11 +203,12 @@ pub(crate) mod test {
|
||||
|
||||
use big_s::S;
|
||||
use maplit::btreeset;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::index_uid::IndexUid;
|
||||
use meilisearch_types::keys::{Action, Key};
|
||||
use meilisearch_types::milli::update::Setting;
|
||||
use meilisearch_types::milli::{self};
|
||||
use meilisearch_types::settings::{Checked, Settings};
|
||||
use meilisearch_types::star_or::StarOr;
|
||||
use meilisearch_types::tasks::{Details, Status};
|
||||
use serde_json::{json, Map, Value};
|
||||
use time::macros::datetime;
|
||||
@@ -340,7 +341,7 @@ pub(crate) mod test {
|
||||
name: Some(S("doggos_key")),
|
||||
uid: Uuid::from_str("9f8a34da-b6b2-42f0-939b-dbd4c3448655").unwrap(),
|
||||
actions: vec![Action::DocumentsAll],
|
||||
indexes: vec![IndexUidPattern::from_str("doggos").unwrap()],
|
||||
indexes: vec![StarOr::Other(IndexUid::from_str("doggos").unwrap())],
|
||||
expires_at: Some(datetime!(4130-03-14 12:21 UTC)),
|
||||
created_at: datetime!(1960-11-15 0:00 UTC),
|
||||
updated_at: datetime!(2022-11-10 0:00 UTC),
|
||||
@@ -350,7 +351,7 @@ pub(crate) mod test {
|
||||
name: Some(S("master_key")),
|
||||
uid: Uuid::from_str("4622f717-1c00-47bb-a494-39d76a49b591").unwrap(),
|
||||
actions: vec![Action::All],
|
||||
indexes: vec![IndexUidPattern::all()],
|
||||
indexes: vec![StarOr::Star],
|
||||
expires_at: None,
|
||||
created_at: datetime!(0000-01-01 00:01 UTC),
|
||||
updated_at: datetime!(1964-05-04 17:25 UTC),
|
||||
|
||||
@@ -181,8 +181,10 @@ impl CompatV5ToV6 {
|
||||
.indexes
|
||||
.into_iter()
|
||||
.map(|index| match index {
|
||||
v5::StarOr::Star => v6::IndexUidPattern::all(),
|
||||
v5::StarOr::Other(uid) => v6::IndexUidPattern::new_unchecked(uid.as_str()),
|
||||
v5::StarOr::Star => v6::StarOr::Star,
|
||||
v5::StarOr::Other(uid) => {
|
||||
v6::StarOr::Other(v6::IndexUid::new_unchecked(uid.as_str()))
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
expires_at: key.expires_at,
|
||||
|
||||
@@ -34,7 +34,8 @@ pub type PaginationSettings = meilisearch_types::settings::PaginationSettings;
|
||||
|
||||
// everything related to the api keys
|
||||
pub type Action = meilisearch_types::keys::Action;
|
||||
pub type IndexUidPattern = meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
pub type StarOr<T> = meilisearch_types::star_or::StarOr<T>;
|
||||
pub type IndexUid = meilisearch_types::index_uid::IndexUid;
|
||||
|
||||
// everything related to the errors
|
||||
pub type ResponseError = meilisearch_types::error::ResponseError;
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
[package]
|
||||
name = "file-store"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tempfile = "3.3.0"
|
||||
|
||||
@@ -116,20 +116,10 @@ impl FileStore {
|
||||
|
||||
/// List the Uuids of the files in the FileStore
|
||||
pub fn all_uuids(&self) -> Result<impl Iterator<Item = Result<Uuid>>> {
|
||||
Ok(self.path.read_dir()?.filter_map(|entry| {
|
||||
let file_name = match entry {
|
||||
Ok(entry) => entry.file_name(),
|
||||
Err(e) => return Some(Err(e.into())),
|
||||
};
|
||||
let file_name = match file_name.to_str() {
|
||||
Some(file_name) => file_name,
|
||||
None => return Some(Err(Error::CouldNotParseFileNameAsUtf8)),
|
||||
};
|
||||
if file_name.starts_with('.') {
|
||||
None
|
||||
} else {
|
||||
Some(Uuid::from_str(file_name).map_err(|e| e.into()))
|
||||
}
|
||||
Ok(self.path.read_dir()?.map(|entry| {
|
||||
Ok(Uuid::from_str(
|
||||
entry?.file_name().to_str().ok_or(Error::CouldNotParseFileNameAsUtf8)?,
|
||||
)?)
|
||||
}))
|
||||
}
|
||||
}
|
||||
@@ -145,34 +135,3 @@ impl File {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io::Write;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn all_uuids() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let fs = FileStore::new(dir.path()).unwrap();
|
||||
let (uuid, mut file) = fs.new_update().unwrap();
|
||||
file.write_all(b"Hello world").unwrap();
|
||||
file.persist().unwrap();
|
||||
let all_uuids = fs.all_uuids().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(all_uuids, vec![uuid]);
|
||||
|
||||
let (uuid2, file) = fs.new_update().unwrap();
|
||||
let all_uuids = fs.all_uuids().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
assert_eq!(all_uuids, vec![uuid]);
|
||||
|
||||
file.persist().unwrap();
|
||||
let mut all_uuids = fs.all_uuids().unwrap().collect::<Result<Vec<_>>>().unwrap();
|
||||
all_uuids.sort();
|
||||
let mut expected = vec![uuid, uuid2];
|
||||
expected.sort();
|
||||
assert_eq!(all_uuids, expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
[package]
|
||||
name = "filter-parser"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
description = "The parser for the Meilisearch filter syntax"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
# description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
nom = "7.1.1"
|
||||
nom_locate = "4.0.0"
|
||||
|
||||
@@ -141,7 +141,7 @@ pub enum FilterCondition<'a> {
|
||||
Or(Vec<Self>),
|
||||
And(Vec<Self>),
|
||||
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
|
||||
GeoBoundingBox { top_right_point: [Token<'a>; 2], bottom_left_point: [Token<'a>; 2] },
|
||||
GeoBoundingBox { top_left_point: [Token<'a>; 2], bottom_right_point: [Token<'a>; 2] },
|
||||
}
|
||||
|
||||
impl<'a> FilterCondition<'a> {
|
||||
@@ -362,8 +362,8 @@ fn parse_geo_bounding_box(input: Span) -> IResult<FilterCondition> {
|
||||
}
|
||||
|
||||
let res = FilterCondition::GeoBoundingBox {
|
||||
top_right_point: [args[0][0].into(), args[0][1].into()],
|
||||
bottom_left_point: [args[1][0].into(), args[1][1].into()],
|
||||
top_left_point: [args[0][0].into(), args[0][1].into()],
|
||||
bottom_right_point: [args[1][0].into(), args[1][1].into()],
|
||||
};
|
||||
Ok((input, res))
|
||||
}
|
||||
@@ -780,10 +780,7 @@ impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
FilterCondition::GeoBoundingBox {
|
||||
top_right_point: top_left_point,
|
||||
bottom_left_point: bottom_right_point,
|
||||
} => {
|
||||
FilterCondition::GeoBoundingBox { top_left_point, bottom_right_point } => {
|
||||
write!(
|
||||
f,
|
||||
"_geoBoundingBox([{}, {}], [{}, {}])",
|
||||
|
||||
@@ -1,17 +1,11 @@
|
||||
[package]
|
||||
name = "flatten-serde-json"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
description = "Flatten serde-json objects like elastic search"
|
||||
readme = "README.md"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
# description.workspace = true
|
||||
homepage.workspace = true
|
||||
# readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0"
|
||||
|
||||
|
||||
@@ -1,14 +1,7 @@
|
||||
[package]
|
||||
name = "index-scheduler"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.64"
|
||||
@@ -19,7 +12,6 @@ dump = { path = "../dump" }
|
||||
enum-iterator = "1.1.3"
|
||||
file-store = { path = "../file-store" }
|
||||
log = "0.4.14"
|
||||
meilisearch-auth = { path = "../meilisearch-auth" }
|
||||
meilisearch-types = { path = "../meilisearch-types" }
|
||||
page_size = "0.5.0"
|
||||
roaring = { version = "0.10.0", features = ["serde"] }
|
||||
|
||||
@@ -88,11 +88,11 @@ pub enum BatchKind {
|
||||
DocumentClear {
|
||||
ids: Vec<TaskId>,
|
||||
},
|
||||
DocumentOperation {
|
||||
DocumentImport {
|
||||
method: IndexDocumentsMethod,
|
||||
allow_index_creation: bool,
|
||||
primary_key: Option<String>,
|
||||
operation_ids: Vec<TaskId>,
|
||||
import_ids: Vec<TaskId>,
|
||||
},
|
||||
DocumentDeletion {
|
||||
deletion_ids: Vec<TaskId>,
|
||||
@@ -102,12 +102,12 @@ pub enum BatchKind {
|
||||
allow_index_creation: bool,
|
||||
settings_ids: Vec<TaskId>,
|
||||
},
|
||||
SettingsAndDocumentOperation {
|
||||
SettingsAndDocumentImport {
|
||||
settings_ids: Vec<TaskId>,
|
||||
method: IndexDocumentsMethod,
|
||||
allow_index_creation: bool,
|
||||
primary_key: Option<String>,
|
||||
operation_ids: Vec<TaskId>,
|
||||
import_ids: Vec<TaskId>,
|
||||
},
|
||||
Settings {
|
||||
allow_index_creation: bool,
|
||||
@@ -131,9 +131,9 @@ impl BatchKind {
|
||||
#[rustfmt::skip]
|
||||
fn allow_index_creation(&self) -> Option<bool> {
|
||||
match self {
|
||||
BatchKind::DocumentOperation { allow_index_creation, .. }
|
||||
BatchKind::DocumentImport { allow_index_creation, .. }
|
||||
| BatchKind::ClearAndSettings { allow_index_creation, .. }
|
||||
| BatchKind::SettingsAndDocumentOperation { allow_index_creation, .. }
|
||||
| BatchKind::SettingsAndDocumentImport { allow_index_creation, .. }
|
||||
| BatchKind::Settings { allow_index_creation, .. } => Some(*allow_index_creation),
|
||||
_ => None,
|
||||
}
|
||||
@@ -141,8 +141,8 @@ impl BatchKind {
|
||||
|
||||
fn primary_key(&self) -> Option<Option<&str>> {
|
||||
match self {
|
||||
BatchKind::DocumentOperation { primary_key, .. }
|
||||
| BatchKind::SettingsAndDocumentOperation { primary_key, .. } => {
|
||||
BatchKind::DocumentImport { primary_key, .. }
|
||||
| BatchKind::SettingsAndDocumentImport { primary_key, .. } => {
|
||||
Some(primary_key.as_deref())
|
||||
}
|
||||
_ => None,
|
||||
@@ -173,22 +173,22 @@ impl BatchKind {
|
||||
if primary_key.is_none() || pk.is_none() || primary_key == pk.as_deref() =>
|
||||
{
|
||||
(
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
Continue(BatchKind::DocumentImport {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key: pk,
|
||||
operation_ids: vec![task_id],
|
||||
import_ids: vec![task_id],
|
||||
}),
|
||||
allow_index_creation,
|
||||
)
|
||||
}
|
||||
// if the primary key set in the task was different than ours we should stop and make this batch fail asap.
|
||||
K::DocumentImport { method, allow_index_creation, primary_key } => (
|
||||
Break(BatchKind::DocumentOperation {
|
||||
Break(BatchKind::DocumentImport {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids: vec![task_id],
|
||||
import_ids: vec![task_id],
|
||||
}),
|
||||
allow_index_creation,
|
||||
),
|
||||
@@ -249,7 +249,7 @@ impl BatchKind {
|
||||
(
|
||||
BatchKind::DocumentClear { mut ids }
|
||||
| BatchKind::DocumentDeletion { deletion_ids: mut ids }
|
||||
| BatchKind::DocumentOperation { method: _, allow_index_creation: _, primary_key: _, operation_ids: mut ids }
|
||||
| BatchKind::DocumentImport { method: _, allow_index_creation: _, primary_key: _, import_ids: mut ids }
|
||||
| BatchKind::Settings { allow_index_creation: _, settings_ids: mut ids },
|
||||
K::IndexDeletion,
|
||||
) => {
|
||||
@@ -258,7 +258,7 @@ impl BatchKind {
|
||||
}
|
||||
(
|
||||
BatchKind::ClearAndSettings { settings_ids: mut ids, allow_index_creation: _, mut other }
|
||||
| BatchKind::SettingsAndDocumentOperation { operation_ids: mut ids, method: _, allow_index_creation: _, primary_key: _, settings_ids: mut other },
|
||||
| BatchKind::SettingsAndDocumentImport { import_ids: mut ids, method: _, allow_index_creation: _, primary_key: _, settings_ids: mut other },
|
||||
K::IndexDeletion,
|
||||
) => {
|
||||
ids.push(id);
|
||||
@@ -278,71 +278,63 @@ impl BatchKind {
|
||||
K::DocumentImport { .. } | K::Settings { .. },
|
||||
) => Break(this),
|
||||
(
|
||||
BatchKind::DocumentOperation { method: _, allow_index_creation: _, primary_key: _, mut operation_ids },
|
||||
BatchKind::DocumentImport { method: _, allow_index_creation: _, primary_key: _, import_ids: mut ids },
|
||||
K::DocumentClear,
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
Continue(BatchKind::DocumentClear { ids: operation_ids })
|
||||
ids.push(id);
|
||||
Continue(BatchKind::DocumentClear { ids })
|
||||
}
|
||||
|
||||
// we can autobatch the same kind of document additions / updates
|
||||
(
|
||||
BatchKind::DocumentOperation { method: ReplaceDocuments, allow_index_creation, primary_key: _, mut operation_ids },
|
||||
BatchKind::DocumentImport { method: ReplaceDocuments, allow_index_creation, primary_key: _, mut import_ids },
|
||||
K::DocumentImport { method: ReplaceDocuments, primary_key: pk, .. },
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
import_ids.push(id);
|
||||
Continue(BatchKind::DocumentImport {
|
||||
method: ReplaceDocuments,
|
||||
allow_index_creation,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
primary_key: pk,
|
||||
})
|
||||
}
|
||||
(
|
||||
BatchKind::DocumentOperation { method: UpdateDocuments, allow_index_creation, primary_key: _, mut operation_ids },
|
||||
BatchKind::DocumentImport { method: UpdateDocuments, allow_index_creation, primary_key: _, mut import_ids },
|
||||
K::DocumentImport { method: UpdateDocuments, primary_key: pk, .. },
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
|
||||
import_ids.push(id);
|
||||
Continue(BatchKind::DocumentImport {
|
||||
method: UpdateDocuments,
|
||||
allow_index_creation,
|
||||
primary_key: pk,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
})
|
||||
}
|
||||
(
|
||||
this @ BatchKind::DocumentOperation { .. },
|
||||
K::DocumentDeletion,
|
||||
) => Break(this),
|
||||
|
||||
// but we can't autobatch documents if it's not the same kind
|
||||
// this match branch MUST be AFTER the previous one
|
||||
(
|
||||
this @ BatchKind::DocumentOperation { .. },
|
||||
K::DocumentImport { .. },
|
||||
this @ BatchKind::DocumentImport { .. },
|
||||
K::DocumentDeletion | K::DocumentImport { .. },
|
||||
) => Break(this),
|
||||
|
||||
(
|
||||
BatchKind::DocumentOperation { method, allow_index_creation, primary_key, operation_ids },
|
||||
BatchKind::DocumentImport { method, allow_index_creation, primary_key, import_ids },
|
||||
K::Settings { .. },
|
||||
) => Continue(BatchKind::SettingsAndDocumentOperation {
|
||||
) => Continue(BatchKind::SettingsAndDocumentImport {
|
||||
settings_ids: vec![id],
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
}),
|
||||
|
||||
(BatchKind::DocumentDeletion { mut deletion_ids }, K::DocumentClear) => {
|
||||
deletion_ids.push(id);
|
||||
Continue(BatchKind::DocumentClear { ids: deletion_ids })
|
||||
}
|
||||
// we can't autobatch a deletion and an import
|
||||
(
|
||||
this @ BatchKind::DocumentDeletion { .. },
|
||||
K::DocumentImport { .. }
|
||||
) => {
|
||||
Break(this)
|
||||
}
|
||||
(this @ BatchKind::DocumentDeletion { .. }, K::DocumentImport { .. }) => Break(this),
|
||||
(BatchKind::DocumentDeletion { mut deletion_ids }, K::DocumentDeletion) => {
|
||||
deletion_ids.push(id);
|
||||
Continue(BatchKind::DocumentDeletion { deletion_ids })
|
||||
@@ -411,60 +403,60 @@ impl BatchKind {
|
||||
})
|
||||
}
|
||||
(
|
||||
BatchKind::SettingsAndDocumentOperation { settings_ids, method: _, mut operation_ids, allow_index_creation, primary_key: _ },
|
||||
BatchKind::SettingsAndDocumentImport { settings_ids, method: _, import_ids: mut other, allow_index_creation, primary_key: _ },
|
||||
K::DocumentClear,
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
other.push(id);
|
||||
Continue(BatchKind::ClearAndSettings {
|
||||
settings_ids,
|
||||
other: operation_ids,
|
||||
other,
|
||||
allow_index_creation,
|
||||
})
|
||||
}
|
||||
|
||||
(
|
||||
BatchKind::SettingsAndDocumentOperation { settings_ids, method: ReplaceDocuments, mut operation_ids, allow_index_creation, primary_key: _},
|
||||
BatchKind::SettingsAndDocumentImport { settings_ids, method: ReplaceDocuments, mut import_ids, allow_index_creation, primary_key: _},
|
||||
K::DocumentImport { method: ReplaceDocuments, primary_key: pk2, .. },
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
Continue(BatchKind::SettingsAndDocumentOperation {
|
||||
import_ids.push(id);
|
||||
Continue(BatchKind::SettingsAndDocumentImport {
|
||||
settings_ids,
|
||||
method: ReplaceDocuments,
|
||||
allow_index_creation,
|
||||
primary_key: pk2,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
})
|
||||
}
|
||||
(
|
||||
BatchKind::SettingsAndDocumentOperation { settings_ids, method: UpdateDocuments, allow_index_creation, primary_key: _, mut operation_ids },
|
||||
BatchKind::SettingsAndDocumentImport { settings_ids, method: UpdateDocuments, allow_index_creation, primary_key: _, mut import_ids },
|
||||
K::DocumentImport { method: UpdateDocuments, primary_key: pk2, .. },
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
Continue(BatchKind::SettingsAndDocumentOperation {
|
||||
import_ids.push(id);
|
||||
Continue(BatchKind::SettingsAndDocumentImport {
|
||||
settings_ids,
|
||||
method: UpdateDocuments,
|
||||
allow_index_creation,
|
||||
primary_key: pk2,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
})
|
||||
}
|
||||
// But we can't batch a settings and a doc op with another doc op
|
||||
// this MUST be AFTER the two previous branch
|
||||
(
|
||||
this @ BatchKind::SettingsAndDocumentOperation { .. },
|
||||
this @ BatchKind::SettingsAndDocumentImport { .. },
|
||||
K::DocumentDeletion | K::DocumentImport { .. },
|
||||
) => Break(this),
|
||||
(
|
||||
BatchKind::SettingsAndDocumentOperation { mut settings_ids, method, allow_index_creation,primary_key, operation_ids },
|
||||
BatchKind::SettingsAndDocumentImport { mut settings_ids, method, allow_index_creation,primary_key, import_ids },
|
||||
K::Settings { .. },
|
||||
) => {
|
||||
settings_ids.push(id);
|
||||
Continue(BatchKind::SettingsAndDocumentOperation {
|
||||
Continue(BatchKind::SettingsAndDocumentImport {
|
||||
settings_ids,
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
})
|
||||
}
|
||||
(
|
||||
@@ -596,29 +588,29 @@ mod tests {
|
||||
fn autobatch_simple_operation_together() {
|
||||
// we can autobatch one or multiple `ReplaceDocuments` together.
|
||||
// if the index exists.
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, false , None), doc_imp(ReplaceDocuments, false , None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1, 2] }, false))");
|
||||
|
||||
// if it doesn't exists.
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp( ReplaceDocuments, true , None), doc_imp(ReplaceDocuments, true , None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
|
||||
// we can autobatch one or multiple `UpdateDocuments` together.
|
||||
// if the index exists.
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1, 2] }, false))");
|
||||
|
||||
// if it doesn't exists.
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1, 2] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1, 2] }, false))");
|
||||
|
||||
// we can autobatch one or multiple DocumentDeletion together
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del()]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
@@ -636,83 +628,56 @@ mod tests {
|
||||
debug_snapshot!(autobatch_from(false,None, [settings(true), settings(true), settings(true)]), @"Some((Settings { allow_index_creation: true, settings_ids: [0, 1, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||
|
||||
// We can't autobatch document addition with document deletion
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
// we also can't do the only way around
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_document_operation_dont_autobatch_with_other() {
|
||||
// addition, updates and deletion can't batch together
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_create()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_create()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_create()]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_update()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_update()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_update()]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), idx_swap()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), idx_swap()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), idx_swap()]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_addition_batch_with_settings() {
|
||||
// simple case
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
|
||||
// multiple settings and doc addition
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [2, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [2, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [2, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None), settings(true), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [2, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
|
||||
// addition and setting unordered
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1, 3], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1, 3], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 2] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1, 3], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 2] }, true))");
|
||||
|
||||
// We ensure this kind of batch doesn't batch with forbidden operations
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_imp(UpdateDocuments, true, None)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_imp(ReplaceDocuments, true, None)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), doc_del()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), doc_del()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_create()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_create()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_update()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_update()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true), idx_swap()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), settings(true), idx_swap()]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: UpdateDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -824,73 +789,67 @@ mod tests {
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), settings(false), doc_clr(), idx_del()]), @"Some((IndexDeletion { ids: [1, 3, 0, 2] }, false))");
|
||||
// The third and final case is when the first task doesn't create an index but is directly followed by a task creating an index. In this case we can't batch whith what
|
||||
// follows because we first need to process the erronous batch.
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments,false, None), settings(true), idx_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), settings(true), idx_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments,false, None), settings(true), doc_clr(), idx_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), settings(true), doc_clr(), idx_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments,false, None), settings(true), idx_del()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), settings(true), idx_del()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments,false, None), settings(true), doc_clr(), idx_del()]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(UpdateDocuments, false, None), settings(true), doc_clr(), idx_del()]), @"Some((DocumentImport { method: UpdateDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn allowed_and_disallowed_index_creation() {
|
||||
// `DocumentImport` can't be mixed with those disallowed to do so except if the index already exists.
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentOperation { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
|
||||
// batch deletion and addition
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, true, None), settings(true)]), @"Some((SettingsAndDocumentImport { settings_ids: [1], method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false,None, [doc_imp(ReplaceDocuments, false, None), settings(true)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, import_ids: [0] }, false))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn autobatch_primary_key() {
|
||||
// ==> If I have a pk
|
||||
// With a single update
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
|
||||
// With a multiple updates
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other"))]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, Some("id"), [doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("other")), doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
|
||||
// ==> If I don't have a pk
|
||||
// With a single update
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("other"))]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("other"), import_ids: [0] }, true))"###);
|
||||
|
||||
// With a multiple updates
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_imp(ReplaceDocuments, true, Some("id"))]), @"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, import_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("id")), doc_imp(ReplaceDocuments, true, None)]), @r###"Some((DocumentImport { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("id"), import_ids: [0] }, true))"###);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,8 @@ use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::heed::CompactionOption;
|
||||
use meilisearch_types::milli::update::{
|
||||
DocumentDeletionResult, IndexDocumentsConfig, IndexDocumentsMethod, Settings as MilliSettings,
|
||||
DocumentAdditionResult, DocumentDeletionResult, IndexDocumentsConfig, IndexDocumentsMethod,
|
||||
Settings as MilliSettings,
|
||||
};
|
||||
use meilisearch_types::milli::{self, BEU32};
|
||||
use meilisearch_types::settings::{apply_settings_to_builder, Settings, Unchecked};
|
||||
@@ -85,21 +86,15 @@ pub(crate) enum Batch {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum DocumentOperation {
|
||||
Add(Uuid),
|
||||
Delete(Vec<String>),
|
||||
}
|
||||
|
||||
/// A [batch](Batch) that combines multiple tasks operating on an index.
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum IndexOperation {
|
||||
DocumentOperation {
|
||||
DocumentImport {
|
||||
index_uid: String,
|
||||
primary_key: Option<String>,
|
||||
method: IndexDocumentsMethod,
|
||||
documents_counts: Vec<u64>,
|
||||
operations: Vec<DocumentOperation>,
|
||||
content_files: Vec<Uuid>,
|
||||
tasks: Vec<Task>,
|
||||
},
|
||||
DocumentDeletion {
|
||||
@@ -126,13 +121,13 @@ pub(crate) enum IndexOperation {
|
||||
settings: Vec<(bool, Settings<Unchecked>)>,
|
||||
settings_tasks: Vec<Task>,
|
||||
},
|
||||
SettingsAndDocumentOperation {
|
||||
SettingsAndDocumentImport {
|
||||
index_uid: String,
|
||||
|
||||
primary_key: Option<String>,
|
||||
method: IndexDocumentsMethod,
|
||||
documents_counts: Vec<u64>,
|
||||
operations: Vec<DocumentOperation>,
|
||||
content_files: Vec<Uuid>,
|
||||
document_import_tasks: Vec<Task>,
|
||||
|
||||
// The boolean indicates if it's a settings deletion or creation.
|
||||
@@ -154,13 +149,13 @@ impl Batch {
|
||||
tasks.iter().map(|task| task.uid).collect()
|
||||
}
|
||||
Batch::IndexOperation { op, .. } => match op {
|
||||
IndexOperation::DocumentOperation { tasks, .. }
|
||||
IndexOperation::DocumentImport { tasks, .. }
|
||||
| IndexOperation::DocumentDeletion { tasks, .. }
|
||||
| IndexOperation::Settings { tasks, .. }
|
||||
| IndexOperation::DocumentClear { tasks, .. } => {
|
||||
tasks.iter().map(|task| task.uid).collect()
|
||||
}
|
||||
IndexOperation::SettingsAndDocumentOperation {
|
||||
IndexOperation::SettingsAndDocumentImport {
|
||||
document_import_tasks: tasks,
|
||||
settings_tasks: other,
|
||||
..
|
||||
@@ -174,33 +169,17 @@ impl Batch {
|
||||
Batch::IndexSwap { task } => vec![task.uid],
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the index UID associated with this batch
|
||||
pub fn index_uid(&self) -> Option<&str> {
|
||||
use Batch::*;
|
||||
match self {
|
||||
TaskCancelation { .. }
|
||||
| TaskDeletion(_)
|
||||
| SnapshotCreation(_)
|
||||
| Dump(_)
|
||||
| IndexSwap { .. } => None,
|
||||
IndexOperation { op, .. } => Some(op.index_uid()),
|
||||
IndexCreation { index_uid, .. }
|
||||
| IndexUpdate { index_uid, .. }
|
||||
| IndexDeletion { index_uid, .. } => Some(index_uid),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexOperation {
|
||||
pub fn index_uid(&self) -> &str {
|
||||
match self {
|
||||
IndexOperation::DocumentOperation { index_uid, .. }
|
||||
IndexOperation::DocumentImport { index_uid, .. }
|
||||
| IndexOperation::DocumentDeletion { index_uid, .. }
|
||||
| IndexOperation::DocumentClear { index_uid, .. }
|
||||
| IndexOperation::Settings { index_uid, .. }
|
||||
| IndexOperation::DocumentClearAndSetting { index_uid, .. }
|
||||
| IndexOperation::SettingsAndDocumentOperation { index_uid, .. } => index_uid,
|
||||
| IndexOperation::SettingsAndDocumentImport { index_uid, .. } => index_uid,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,22 +206,17 @@ impl IndexScheduler {
|
||||
},
|
||||
must_create_index,
|
||||
})),
|
||||
BatchKind::DocumentOperation { method, operation_ids, .. } => {
|
||||
let tasks = self.get_existing_tasks(rtxn, operation_ids)?;
|
||||
let primary_key = tasks
|
||||
.iter()
|
||||
.find_map(|task| match task.kind {
|
||||
KindWithContent::DocumentAdditionOrUpdate { ref primary_key, .. } => {
|
||||
// we want to stop on the first document addition
|
||||
Some(primary_key.clone())
|
||||
}
|
||||
KindWithContent::DocumentDeletion { .. } => None,
|
||||
_ => unreachable!(),
|
||||
})
|
||||
.flatten();
|
||||
BatchKind::DocumentImport { method, import_ids, .. } => {
|
||||
let tasks = self.get_existing_tasks(rtxn, import_ids)?;
|
||||
let primary_key = match &tasks[0].kind {
|
||||
KindWithContent::DocumentAdditionOrUpdate { primary_key, .. } => {
|
||||
primary_key.clone()
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut documents_counts = Vec::new();
|
||||
let mut operations = Vec::new();
|
||||
let mut content_files = Vec::new();
|
||||
|
||||
for task in tasks.iter() {
|
||||
match task.kind {
|
||||
@@ -252,23 +226,19 @@ impl IndexScheduler {
|
||||
..
|
||||
} => {
|
||||
documents_counts.push(documents_count);
|
||||
operations.push(DocumentOperation::Add(content_file));
|
||||
}
|
||||
KindWithContent::DocumentDeletion { ref documents_ids, .. } => {
|
||||
documents_counts.push(documents_ids.len() as u64);
|
||||
operations.push(DocumentOperation::Delete(documents_ids.clone()));
|
||||
content_files.push(content_file);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(Batch::IndexOperation {
|
||||
op: IndexOperation::DocumentOperation {
|
||||
op: IndexOperation::DocumentImport {
|
||||
index_uid,
|
||||
primary_key,
|
||||
method,
|
||||
documents_counts,
|
||||
operations,
|
||||
content_files,
|
||||
tasks,
|
||||
},
|
||||
must_create_index,
|
||||
@@ -352,12 +322,12 @@ impl IndexScheduler {
|
||||
must_create_index,
|
||||
}))
|
||||
}
|
||||
BatchKind::SettingsAndDocumentOperation {
|
||||
BatchKind::SettingsAndDocumentImport {
|
||||
settings_ids,
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
} => {
|
||||
let settings = self.create_next_batch_index(
|
||||
rtxn,
|
||||
@@ -369,11 +339,11 @@ impl IndexScheduler {
|
||||
let document_import = self.create_next_batch_index(
|
||||
rtxn,
|
||||
index_uid.clone(),
|
||||
BatchKind::DocumentOperation {
|
||||
BatchKind::DocumentImport {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids,
|
||||
import_ids,
|
||||
},
|
||||
must_create_index,
|
||||
)?;
|
||||
@@ -382,10 +352,10 @@ impl IndexScheduler {
|
||||
(
|
||||
Some(Batch::IndexOperation {
|
||||
op:
|
||||
IndexOperation::DocumentOperation {
|
||||
IndexOperation::DocumentImport {
|
||||
primary_key,
|
||||
documents_counts,
|
||||
operations,
|
||||
content_files,
|
||||
tasks: document_import_tasks,
|
||||
..
|
||||
},
|
||||
@@ -396,12 +366,12 @@ impl IndexScheduler {
|
||||
..
|
||||
}),
|
||||
) => Ok(Some(Batch::IndexOperation {
|
||||
op: IndexOperation::SettingsAndDocumentOperation {
|
||||
op: IndexOperation::SettingsAndDocumentImport {
|
||||
index_uid,
|
||||
primary_key,
|
||||
method,
|
||||
documents_counts,
|
||||
operations,
|
||||
content_files,
|
||||
document_import_tasks,
|
||||
settings,
|
||||
settings_tasks,
|
||||
@@ -675,6 +645,9 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
// 3. Snapshot every indexes
|
||||
// TODO we are opening all of the indexes it can be too much we should unload all
|
||||
// of the indexes we are trying to open. It would be even better to only unload
|
||||
// the ones that were opened by us. Or maybe use a LRU in the index mapper.
|
||||
for result in self.index_mapper.index_mapping.iter(&rtxn)? {
|
||||
let (name, uuid) = result?;
|
||||
let index = self.index_mapper.index(&rtxn, name)?;
|
||||
@@ -711,14 +684,6 @@ impl IndexScheduler {
|
||||
// 5.3 Change the permission to make the snapshot readonly
|
||||
let mut permissions = file.metadata()?.permissions();
|
||||
permissions.set_readonly(true);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
#[allow(clippy::non_octal_unix_permissions)]
|
||||
// rwxrwxrwx
|
||||
permissions.set_mode(0b100100100);
|
||||
}
|
||||
|
||||
file.set_permissions(permissions)?;
|
||||
|
||||
for task in &mut tasks {
|
||||
@@ -793,15 +758,15 @@ impl IndexScheduler {
|
||||
dump_tasks.flush()?;
|
||||
|
||||
// 3. Dump the indexes
|
||||
self.index_mapper.try_for_each_index(&rtxn, |uid, index| -> Result<()> {
|
||||
for (uid, index) in self.index_mapper.indexes(&rtxn)? {
|
||||
let rtxn = index.read_txn()?;
|
||||
let metadata = IndexMetadata {
|
||||
uid: uid.to_owned(),
|
||||
uid: uid.clone(),
|
||||
primary_key: index.primary_key(&rtxn)?.map(String::from),
|
||||
created_at: index.created_at(&rtxn)?,
|
||||
updated_at: index.updated_at(&rtxn)?,
|
||||
};
|
||||
let mut index_dumper = dump.create_index(uid, &metadata)?;
|
||||
let mut index_dumper = dump.create_index(&uid, &metadata)?;
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&rtxn)?;
|
||||
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
|
||||
@@ -814,10 +779,9 @@ impl IndexScheduler {
|
||||
}
|
||||
|
||||
// 3.2. Dump the settings
|
||||
let settings = meilisearch_types::settings::settings(index, &rtxn)?;
|
||||
let settings = meilisearch_types::settings::settings(&index, &rtxn)?;
|
||||
index_dumper.settings(&settings)?;
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
|
||||
let dump_uid = started_at.format(format_description!(
|
||||
"[year repr:full][month repr:numerical][day padding:zero]-[hour padding:zero][minute padding:zero][second padding:zero][subsecond digits:3]"
|
||||
@@ -833,38 +797,20 @@ impl IndexScheduler {
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::IndexOperation { op, must_create_index } => {
|
||||
let index_uid = op.index_uid().to_string();
|
||||
let index_uid = op.index_uid();
|
||||
let index = if must_create_index {
|
||||
// create the index if it doesn't already exist
|
||||
let wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.create_index(wtxn, &index_uid, None)?
|
||||
self.index_mapper.create_index(wtxn, index_uid, None)?
|
||||
} else {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
self.index_mapper.index(&rtxn, &index_uid)?
|
||||
self.index_mapper.index(&rtxn, index_uid)?
|
||||
};
|
||||
|
||||
let mut index_wtxn = index.write_txn()?;
|
||||
let tasks = self.apply_index_operation(&mut index_wtxn, &index, op)?;
|
||||
index_wtxn.commit()?;
|
||||
|
||||
// if the update processed successfully, we're going to store the new
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let res = || -> Result<()> {
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
}();
|
||||
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(e) => error!("Could not write the stats of the index {}", e),
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
Batch::IndexCreation { index_uid, primary_key, task } => {
|
||||
@@ -895,31 +841,9 @@ impl IndexScheduler {
|
||||
)?;
|
||||
index_wtxn.commit()?;
|
||||
}
|
||||
|
||||
// drop rtxn before starting a new wtxn on the same db
|
||||
rtxn.commit()?;
|
||||
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::IndexInfo { primary_key });
|
||||
|
||||
// if the update processed successfully, we're going to store the new
|
||||
// stats of the index. Since the tasks have already been processed and
|
||||
// this is a non-critical operation. If it fails, we should not fail
|
||||
// the entire batch.
|
||||
let res = || -> Result<()> {
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
let stats = crate::index_mapper::IndexStats::new(&index, &index_rtxn)?;
|
||||
self.index_mapper.store_stats_of(&mut wtxn, &index_uid, &stats)?;
|
||||
wtxn.commit()?;
|
||||
Ok(())
|
||||
}();
|
||||
|
||||
match res {
|
||||
Ok(_) => (),
|
||||
Err(e) => error!("Could not write the stats of the index {}", e),
|
||||
}
|
||||
|
||||
Ok(vec![task])
|
||||
}
|
||||
Batch::IndexDeletion { index_uid, index_has_been_created, mut tasks } => {
|
||||
@@ -1063,12 +987,12 @@ impl IndexScheduler {
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
IndexOperation::DocumentOperation {
|
||||
IndexOperation::DocumentImport {
|
||||
index_uid: _,
|
||||
primary_key,
|
||||
method,
|
||||
documents_counts: _,
|
||||
operations,
|
||||
documents_counts,
|
||||
content_files,
|
||||
mut tasks,
|
||||
} => {
|
||||
let mut primary_key_has_been_set = false;
|
||||
@@ -1113,82 +1037,26 @@ impl IndexScheduler {
|
||||
|| must_stop_processing.get(),
|
||||
)?;
|
||||
|
||||
for (operation, task) in operations.into_iter().zip(tasks.iter_mut()) {
|
||||
match operation {
|
||||
DocumentOperation::Add(content_uuid) => {
|
||||
let content_file = self.file_store.get_update(content_uuid)?;
|
||||
let reader = DocumentsBatchReader::from_reader(content_file)
|
||||
.map_err(milli::Error::from)?;
|
||||
let (new_builder, user_result) = builder.add_documents(reader)?;
|
||||
builder = new_builder;
|
||||
let mut results = Vec::new();
|
||||
for content_uuid in content_files.into_iter() {
|
||||
let content_file = self.file_store.get_update(content_uuid)?;
|
||||
let reader = DocumentsBatchReader::from_reader(content_file)
|
||||
.map_err(milli::Error::from)?;
|
||||
let (new_builder, user_result) = builder.add_documents(reader)?;
|
||||
builder = new_builder;
|
||||
|
||||
let received_documents =
|
||||
if let Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents,
|
||||
..
|
||||
}) = task.details
|
||||
{
|
||||
received_documents
|
||||
} else {
|
||||
// In the case of a `documentAdditionOrUpdate` the details MUST be set
|
||||
unreachable!();
|
||||
};
|
||||
let user_result = match user_result {
|
||||
Ok(count) => Ok(DocumentAdditionResult {
|
||||
indexed_documents: count,
|
||||
number_of_documents: count, // TODO: this is wrong, we should use the value stored in the Details.
|
||||
}),
|
||||
Err(e) => Err(milli::Error::from(e)),
|
||||
};
|
||||
|
||||
match user_result {
|
||||
Ok(count) => {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents,
|
||||
indexed_documents: Some(count),
|
||||
})
|
||||
}
|
||||
Err(e) => {
|
||||
task.status = Status::Failed;
|
||||
task.details = Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents,
|
||||
indexed_documents: Some(0),
|
||||
});
|
||||
task.error = Some(milli::Error::from(e).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
DocumentOperation::Delete(document_ids) => {
|
||||
let (new_builder, user_result) =
|
||||
builder.remove_documents(document_ids)?;
|
||||
builder = new_builder;
|
||||
|
||||
let provided_ids =
|
||||
if let Some(Details::DocumentDeletion { provided_ids, .. }) =
|
||||
task.details
|
||||
{
|
||||
provided_ids
|
||||
} else {
|
||||
// In the case of a `documentAdditionOrUpdate` the details MUST be set
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
match user_result {
|
||||
Ok(count) => {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids,
|
||||
deleted_documents: Some(count),
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
task.status = Status::Failed;
|
||||
task.details = Some(Details::DocumentDeletion {
|
||||
provided_ids,
|
||||
deleted_documents: Some(0),
|
||||
});
|
||||
task.error = Some(milli::Error::from(e).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
results.push(user_result);
|
||||
}
|
||||
|
||||
if !tasks.iter().all(|res| res.error.is_some()) {
|
||||
if results.iter().any(|res| res.is_ok()) {
|
||||
let addition = builder.execute()?;
|
||||
info!("document addition done: {:?}", addition);
|
||||
} else if primary_key_has_been_set {
|
||||
@@ -1203,6 +1071,29 @@ impl IndexScheduler {
|
||||
)?;
|
||||
}
|
||||
|
||||
for (task, (ret, count)) in
|
||||
tasks.iter_mut().zip(results.into_iter().zip(documents_counts))
|
||||
{
|
||||
match ret {
|
||||
Ok(DocumentAdditionResult { indexed_documents, number_of_documents }) => {
|
||||
task.status = Status::Succeeded;
|
||||
task.details = Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents: number_of_documents,
|
||||
indexed_documents: Some(indexed_documents),
|
||||
});
|
||||
}
|
||||
Err(error) => {
|
||||
task.status = Status::Failed;
|
||||
task.details = Some(Details::DocumentAdditionOrUpdate {
|
||||
received_documents: count,
|
||||
// if there was an error we indexed 0 documents.
|
||||
indexed_documents: Some(0),
|
||||
});
|
||||
task.error = Some(error.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
IndexOperation::DocumentDeletion { index_uid: _, documents, mut tasks } => {
|
||||
@@ -1245,12 +1136,12 @@ impl IndexScheduler {
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
IndexOperation::SettingsAndDocumentOperation {
|
||||
IndexOperation::SettingsAndDocumentImport {
|
||||
index_uid,
|
||||
primary_key,
|
||||
method,
|
||||
documents_counts,
|
||||
operations,
|
||||
content_files,
|
||||
document_import_tasks,
|
||||
settings,
|
||||
settings_tasks,
|
||||
@@ -1268,12 +1159,12 @@ impl IndexScheduler {
|
||||
let mut import_tasks = self.apply_index_operation(
|
||||
index_wtxn,
|
||||
index,
|
||||
IndexOperation::DocumentOperation {
|
||||
IndexOperation::DocumentImport {
|
||||
index_uid,
|
||||
primary_key,
|
||||
method,
|
||||
documents_counts,
|
||||
operations,
|
||||
content_files,
|
||||
tasks: document_import_tasks,
|
||||
},
|
||||
)?;
|
||||
|
||||
250
index-scheduler/src/index_mapper.rs
Normal file
250
index-scheduler/src/index_mapper.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{fs, thread};
|
||||
|
||||
use log::error;
|
||||
use meilisearch_types::heed::types::Str;
|
||||
use meilisearch_types::heed::{Database, Env, EnvOpenOptions, RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::milli::Index;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use self::IndexStatus::{Available, BeingDeleted};
|
||||
use crate::uuid_codec::UuidCodec;
|
||||
use crate::{clamp_to_page_size, Error, Result};
|
||||
|
||||
const INDEX_MAPPING: &str = "index-mapping";
|
||||
|
||||
/// Structure managing meilisearch's indexes.
|
||||
///
|
||||
/// It is responsible for:
|
||||
/// 1. Creating new indexes
|
||||
/// 2. Opening indexes and storing references to these opened indexes
|
||||
/// 3. Accessing indexes through their uuid
|
||||
/// 4. Mapping a user-defined name to each index uuid.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexMapper {
|
||||
/// Keep track of the opened indexes. Used mainly by the index resolver.
|
||||
index_map: Arc<RwLock<HashMap<Uuid, IndexStatus>>>,
|
||||
|
||||
/// Map an index name with an index uuid currently available on disk.
|
||||
pub(crate) index_mapping: Database<Str, UuidCodec>,
|
||||
|
||||
/// Path to the folder where the LMDB environments of each index are.
|
||||
base_path: PathBuf,
|
||||
index_size: usize,
|
||||
pub indexer_config: Arc<IndexerConfig>,
|
||||
}
|
||||
|
||||
/// Whether the index is available for use or is forbidden to be inserted back in the index map
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone)]
|
||||
pub enum IndexStatus {
|
||||
/// Do not insert it back in the index map as it is currently being deleted.
|
||||
BeingDeleted,
|
||||
/// You can use the index without worrying about anything.
|
||||
Available(Index),
|
||||
}
|
||||
|
||||
impl IndexMapper {
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
base_path: PathBuf,
|
||||
index_size: usize,
|
||||
indexer_config: IndexerConfig,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
index_map: Arc::default(),
|
||||
index_mapping: env.create_database(Some(INDEX_MAPPING))?,
|
||||
base_path,
|
||||
index_size,
|
||||
indexer_config: Arc::new(indexer_config),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create or open an index in the specified path.
|
||||
/// The path *must* exists or an error will be thrown.
|
||||
fn create_or_open_index(
|
||||
&self,
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
) -> Result<Index> {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(clamp_to_page_size(self.index_size));
|
||||
options.max_readers(1024);
|
||||
|
||||
if let Some((created, updated)) = date {
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||
} else {
|
||||
Ok(Index::new(options, path)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create the index.
|
||||
pub fn create_index(
|
||||
&self,
|
||||
mut wtxn: RwTxn,
|
||||
name: &str,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
) -> Result<Index> {
|
||||
match self.index(&wtxn, name) {
|
||||
Ok(index) => {
|
||||
wtxn.commit()?;
|
||||
Ok(index)
|
||||
}
|
||||
Err(Error::IndexNotFound(_)) => {
|
||||
let uuid = Uuid::new_v4();
|
||||
self.index_mapping.put(&mut wtxn, name, &uuid)?;
|
||||
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
fs::create_dir_all(&index_path)?;
|
||||
|
||||
let index = self.create_or_open_index(&index_path, date)?;
|
||||
|
||||
wtxn.commit()?;
|
||||
// TODO: it would be better to lazily create the index. But we need an Index::open function for milli.
|
||||
if let Some(BeingDeleted) =
|
||||
self.index_map.write().unwrap().insert(uuid, Available(index.clone()))
|
||||
{
|
||||
panic!("Uuid v4 conflict.");
|
||||
}
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
error => error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the index from the mapping table and the in-memory index map
|
||||
/// but keeps the associated tasks.
|
||||
pub fn delete_index(&self, mut wtxn: RwTxn, name: &str) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(&wtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// Once we retrieved the UUID of the index we remove it from the mapping table.
|
||||
assert!(self.index_mapping.delete(&mut wtxn, name)?);
|
||||
|
||||
wtxn.commit()?;
|
||||
// We remove the index from the in-memory index map.
|
||||
let mut lock = self.index_map.write().unwrap();
|
||||
let closing_event = match lock.insert(uuid, BeingDeleted) {
|
||||
Some(Available(index)) => Some(index.prepare_for_closing()),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
drop(lock);
|
||||
|
||||
let index_map = self.index_map.clone();
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
let index_name = name.to_string();
|
||||
thread::Builder::new()
|
||||
.name(String::from("index_deleter"))
|
||||
.spawn(move || {
|
||||
// We first wait to be sure that the previously opened index is effectively closed.
|
||||
// This can take a lot of time, this is why we do that in a seperate thread.
|
||||
if let Some(closing_event) = closing_event {
|
||||
closing_event.wait();
|
||||
}
|
||||
|
||||
// Then we remove the content from disk.
|
||||
if let Err(e) = fs::remove_dir_all(&index_path) {
|
||||
error!(
|
||||
"An error happened when deleting the index {} ({}): {}",
|
||||
index_name, uuid, e
|
||||
);
|
||||
}
|
||||
|
||||
// Finally we remove the entry from the index map.
|
||||
assert!(matches!(index_map.write().unwrap().remove(&uuid), Some(BeingDeleted)));
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn exists(&self, rtxn: &RoTxn, name: &str) -> Result<bool> {
|
||||
Ok(self.index_mapping.get(rtxn, name)?.is_some())
|
||||
}
|
||||
|
||||
/// Return an index, may open it if it wasn't already opened.
|
||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// we clone here to drop the lock before entering the match
|
||||
let index = self.index_map.read().unwrap().get(&uuid).cloned();
|
||||
let index = match index {
|
||||
Some(Available(index)) => index,
|
||||
Some(BeingDeleted) => return Err(Error::IndexNotFound(name.to_string())),
|
||||
// since we're lazy, it's possible that the index has not been opened yet.
|
||||
None => {
|
||||
let mut index_map = self.index_map.write().unwrap();
|
||||
// between the read lock and the write lock it's not impossible
|
||||
// that someone already opened the index (eg if two search happens
|
||||
// at the same time), thus before opening it we check a second time
|
||||
// if it's not already there.
|
||||
// Since there is a good chance it's not already there we can use
|
||||
// the entry method.
|
||||
match index_map.entry(uuid) {
|
||||
Entry::Vacant(entry) => {
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
|
||||
let index = self.create_or_open_index(&index_path, None)?;
|
||||
entry.insert(Available(index.clone()));
|
||||
index
|
||||
}
|
||||
Entry::Occupied(entry) => match entry.get() {
|
||||
Available(index) => index.clone(),
|
||||
BeingDeleted => return Err(Error::IndexNotFound(name.to_string())),
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Return all indexes, may open them if they weren't already opened.
|
||||
pub fn indexes(&self, rtxn: &RoTxn) -> Result<Vec<(String, Index)>> {
|
||||
self.index_mapping
|
||||
.iter(rtxn)?
|
||||
.map(|ret| {
|
||||
ret.map_err(Error::from).and_then(|(name, _)| {
|
||||
self.index(rtxn, name).map(|index| (name.to_string(), index))
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Swap two index names.
|
||||
pub fn swap(&self, wtxn: &mut RwTxn, lhs: &str, rhs: &str) -> Result<()> {
|
||||
let lhs_uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, lhs)?
|
||||
.ok_or_else(|| Error::IndexNotFound(lhs.to_string()))?;
|
||||
let rhs_uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, rhs)?
|
||||
.ok_or_else(|| Error::IndexNotFound(rhs.to_string()))?;
|
||||
|
||||
self.index_mapping.put(wtxn, lhs, &rhs_uuid)?;
|
||||
self.index_mapping.put(wtxn, rhs, &lhs_uuid)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn index_exists(&self, rtxn: &RoTxn, name: &str) -> Result<bool> {
|
||||
Ok(self.index_mapping.get(rtxn, name)?.is_some())
|
||||
}
|
||||
|
||||
pub fn indexer_config(&self) -> &IndexerConfig {
|
||||
&self.indexer_config
|
||||
}
|
||||
}
|
||||
@@ -1,370 +0,0 @@
|
||||
/// the map size to use when we don't succeed in reading it in indexes.
|
||||
const DEFAULT_MAP_SIZE: usize = 10 * 1024 * 1024 * 1024; // 10 GiB
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
|
||||
use meilisearch_types::milli::Index;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::IndexStatus::{self, Available, BeingDeleted, Closing, Missing};
|
||||
use crate::lru::{InsertionOutcome, LruMap};
|
||||
use crate::{clamp_to_page_size, Result};
|
||||
|
||||
/// Keep an internally consistent view of the open indexes in memory.
|
||||
///
|
||||
/// This view is made of an LRU cache that will evict the least frequently used indexes when new indexes are opened.
|
||||
/// Indexes that are being closed (for resizing or due to cache eviction) or deleted cannot be evicted from the cache and
|
||||
/// are stored separately.
|
||||
///
|
||||
/// This view provides operations to change the state of the index as it is known in memory:
|
||||
/// open an index (making it available for queries), close an index (specifying the new size it should be opened with),
|
||||
/// delete an index.
|
||||
///
|
||||
/// External consistency with the other bits of data of an index is provided by the `IndexMapper` parent structure.
|
||||
pub struct IndexMap {
|
||||
/// A LRU map of indexes that are in the open state and available for queries.
|
||||
available: LruMap<Uuid, Index>,
|
||||
/// A map of indexes that are not available for queries, either because they are being deleted
|
||||
/// or because they are being closed.
|
||||
///
|
||||
/// If they are being deleted, the UUID points to `None`.
|
||||
unavailable: BTreeMap<Uuid, Option<ClosingIndex>>,
|
||||
|
||||
/// A monotonically increasing generation number, used to differentiate between multiple successive index closing requests.
|
||||
///
|
||||
/// Because multiple readers could be waiting on an index to close, the following could theoretically happen:
|
||||
///
|
||||
/// 1. Multiple readers wait for the index closing to occur.
|
||||
/// 2. One of them "wins the race", takes the lock and then removes the index that finished closing from the map.
|
||||
/// 3. The index is reopened, but must be closed again (such as being resized again).
|
||||
/// 4. One reader that "lost the race" in (2) wakes up and tries to take the lock and remove the index from the map.
|
||||
///
|
||||
/// In that situation, the index may or may not have finished closing. The `generation` field allows to remember which
|
||||
/// closing request was made, so the reader that "lost the race" has the old generation and will need to wait again for the index
|
||||
/// to close.
|
||||
generation: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ClosingIndex {
|
||||
uuid: Uuid,
|
||||
closing_event: EnvClosingEvent,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
|
||||
impl ClosingIndex {
|
||||
/// Waits for the index to be definitely closed.
|
||||
///
|
||||
/// To avoid blocking, users should relinquish their locks to the IndexMap before calling this function.
|
||||
///
|
||||
/// After the index is physically closed, the in memory map must still be updated to take this into account.
|
||||
/// To do so, a `ReopenableIndex` is returned, that can be used to either definitely close or definitely open
|
||||
/// the index without waiting anymore.
|
||||
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
|
||||
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
|
||||
uuid: self.uuid,
|
||||
map_size: self.map_size,
|
||||
generation: self.generation,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ReopenableIndex {
|
||||
uuid: Uuid,
|
||||
map_size: usize,
|
||||
generation: usize,
|
||||
}
|
||||
|
||||
impl ReopenableIndex {
|
||||
/// Attempts to reopen the index, which can result in the index being reopened again or not
|
||||
/// (e.g. if another thread already opened and closed the index again).
|
||||
///
|
||||
/// Use get again on the IndexMap to get the updated status.
|
||||
///
|
||||
/// Fails if the underlying index creation fails.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status |
|
||||
/// |-----------------|----------------------------------------------|
|
||||
/// | Missing | Missing |
|
||||
/// | BeingDeleted | BeingDeleted |
|
||||
/// | Closing | Available or Closing depending on generation |
|
||||
/// | Available | Available |
|
||||
///
|
||||
pub fn reopen(self, map: &mut IndexMap, path: &Path) -> Result<()> {
|
||||
if let Closing(reopen) = map.get(&self.uuid) {
|
||||
if reopen.generation != self.generation {
|
||||
return Ok(());
|
||||
}
|
||||
map.unavailable.remove(&self.uuid);
|
||||
map.create(&self.uuid, path, None, self.map_size)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to close the index, which may or may not result in the index being closed
|
||||
/// (e.g. if another thread already reopened the index again).
|
||||
///
|
||||
/// Use get again on the IndexMap to get the updated status.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status |
|
||||
/// |-----------------|--------------------------------------------|
|
||||
/// | Missing | Missing |
|
||||
/// | BeingDeleted | BeingDeleted |
|
||||
/// | Closing | Missing or Closing depending on generation |
|
||||
/// | Available | Available |
|
||||
pub fn close(self, map: &mut IndexMap) {
|
||||
if let Closing(reopen) = map.get(&self.uuid) {
|
||||
if reopen.generation != self.generation {
|
||||
return;
|
||||
}
|
||||
map.unavailable.remove(&self.uuid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMap {
|
||||
pub fn new(cap: usize) -> IndexMap {
|
||||
Self { unavailable: Default::default(), available: LruMap::new(cap), generation: 0 }
|
||||
}
|
||||
|
||||
/// Gets the current status of an index in the map.
|
||||
///
|
||||
/// If the index is available it can be accessed from the returned status.
|
||||
pub fn get(&self, uuid: &Uuid) -> IndexStatus {
|
||||
self.available
|
||||
.get(uuid)
|
||||
.map(|index| Available(index.clone()))
|
||||
.unwrap_or_else(|| self.get_unavailable(uuid))
|
||||
}
|
||||
|
||||
fn get_unavailable(&self, uuid: &Uuid) -> IndexStatus {
|
||||
match self.unavailable.get(uuid) {
|
||||
Some(Some(reopen)) => Closing(reopen.clone()),
|
||||
Some(None) => BeingDeleted,
|
||||
None => Missing,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to create a new index that wasn't existing before.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status |
|
||||
/// |-----------------|------------|
|
||||
/// | Missing | Available |
|
||||
/// | BeingDeleted | panics |
|
||||
/// | Closing | panics |
|
||||
/// | Available | panics |
|
||||
///
|
||||
pub fn create(
|
||||
&mut self,
|
||||
uuid: &Uuid,
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
if !matches!(self.get_unavailable(uuid), Missing) {
|
||||
panic!("Attempt to open an index that was unavailable");
|
||||
}
|
||||
let index = create_or_open_index(path, date, map_size)?;
|
||||
match self.available.insert(*uuid, index.clone()) {
|
||||
InsertionOutcome::InsertedNew => (),
|
||||
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
||||
self.close(evicted_uuid, evicted_index, 0);
|
||||
}
|
||||
InsertionOutcome::Replaced(_) => {
|
||||
panic!("Attempt to open an index that was already opened")
|
||||
}
|
||||
}
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Increases the current generation. See documentation for this field.
|
||||
///
|
||||
/// In the unlikely event that the 2^64 generations would have been exhausted, we simply wrap-around.
|
||||
///
|
||||
/// For this to cause an issue, one should be able to stop a reader in time after it got a `ReopenableIndex` and before it takes the lock
|
||||
/// to remove it from the unavailable map, and keep the reader in this frozen state for 2^64 closing of other indexes.
|
||||
///
|
||||
/// This seems overwhelmingly impossible to achieve in practice.
|
||||
fn next_generation(&mut self) -> usize {
|
||||
self.generation = self.generation.wrapping_add(1);
|
||||
self.generation
|
||||
}
|
||||
|
||||
/// Attempts to close an index.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status |
|
||||
/// |-----------------|---------------|
|
||||
/// | Missing | Missing |
|
||||
/// | BeingDeleted | BeingDeleted |
|
||||
/// | Closing | Closing |
|
||||
/// | Available | Closing |
|
||||
///
|
||||
pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) {
|
||||
let Some(index) = self.available.remove(uuid) else { return; };
|
||||
self.close(*uuid, index, map_size_growth);
|
||||
}
|
||||
|
||||
fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) {
|
||||
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
|
||||
let closing_event = index.prepare_for_closing();
|
||||
let generation = self.next_generation();
|
||||
self.unavailable
|
||||
.insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation }));
|
||||
}
|
||||
|
||||
/// Attempts to delete and index.
|
||||
///
|
||||
/// `end_deletion` must be called just after.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status | Return value |
|
||||
/// |-----------------|--------------|-----------------------------|
|
||||
/// | Missing | BeingDeleted | Ok(None) |
|
||||
/// | BeingDeleted | BeingDeleted | Err(None) |
|
||||
/// | Closing | Closing | Err(Some(reopen)) |
|
||||
/// | Available | BeingDeleted | Ok(Some(env_closing_event)) |
|
||||
pub fn start_deletion(
|
||||
&mut self,
|
||||
uuid: &Uuid,
|
||||
) -> std::result::Result<Option<EnvClosingEvent>, Option<ClosingIndex>> {
|
||||
if let Some(index) = self.available.remove(uuid) {
|
||||
self.unavailable.insert(*uuid, None);
|
||||
return Ok(Some(index.prepare_for_closing()));
|
||||
}
|
||||
match self.unavailable.remove(uuid) {
|
||||
Some(Some(reopen)) => Err(Some(reopen)),
|
||||
Some(None) => Err(None),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks that an index deletion finished.
|
||||
///
|
||||
/// Must be used after calling `start_deletion`.
|
||||
///
|
||||
/// # Status table
|
||||
///
|
||||
/// | Previous Status | New Status |
|
||||
/// |-----------------|------------|
|
||||
/// | Missing | Missing |
|
||||
/// | BeingDeleted | Missing |
|
||||
/// | Closing | panics |
|
||||
/// | Available | panics |
|
||||
pub fn end_deletion(&mut self, uuid: &Uuid) {
|
||||
assert!(
|
||||
self.available.get(uuid).is_none(),
|
||||
"Attempt to finish deletion of an index that was not being deleted"
|
||||
);
|
||||
// Do not panic if the index was Missing or BeingDeleted
|
||||
assert!(
|
||||
!matches!(self.unavailable.remove(uuid), Some(Some(_))),
|
||||
"Attempt to finish deletion of an index that was being closed"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Create or open an index in the specified path.
|
||||
/// The path *must* exist or an error will be thrown.
|
||||
fn create_or_open_index(
|
||||
path: &Path,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
map_size: usize,
|
||||
) -> Result<Index> {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(clamp_to_page_size(map_size));
|
||||
options.max_readers(1024);
|
||||
|
||||
if let Some((created, updated)) = date {
|
||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||
} else {
|
||||
Ok(Index::new(options, path)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Putting the tests of the LRU down there so we have access to the cache's private members
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use meilisearch_types::heed::Env;
|
||||
use meilisearch_types::Index;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::super::IndexMapper;
|
||||
use crate::tests::IndexSchedulerHandle;
|
||||
use crate::utils::clamp_to_page_size;
|
||||
use crate::IndexScheduler;
|
||||
|
||||
impl IndexMapper {
|
||||
fn test() -> (Self, Env, IndexSchedulerHandle) {
|
||||
let (index_scheduler, handle) = IndexScheduler::test(true, vec![]);
|
||||
(index_scheduler.index_mapper, index_scheduler.env, handle)
|
||||
}
|
||||
}
|
||||
|
||||
fn check_first_unavailable(mapper: &IndexMapper, expected_uuid: Uuid, is_closing: bool) {
|
||||
let index_map = mapper.index_map.read().unwrap();
|
||||
let (uuid, state) = index_map.unavailable.first_key_value().unwrap();
|
||||
assert_eq!(uuid, &expected_uuid);
|
||||
assert_eq!(state.is_some(), is_closing);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn evict_indexes() {
|
||||
let (mapper, env, _handle) = IndexMapper::test();
|
||||
let mut uuids = vec![];
|
||||
// LRU cap + 1
|
||||
for i in 0..(5 + 1) {
|
||||
let index_name = format!("index-{i}");
|
||||
let wtxn = env.write_txn().unwrap();
|
||||
mapper.create_index(wtxn, &index_name, None).unwrap();
|
||||
let txn = env.read_txn().unwrap();
|
||||
uuids.push(mapper.index_mapping.get(&txn, &index_name).unwrap().unwrap());
|
||||
}
|
||||
// index-0 was evicted
|
||||
check_first_unavailable(&mapper, uuids[0], true);
|
||||
|
||||
// get back the evicted index
|
||||
let wtxn = env.write_txn().unwrap();
|
||||
mapper.create_index(wtxn, "index-0", None).unwrap();
|
||||
|
||||
// Least recently used is now index-1
|
||||
check_first_unavailable(&mapper, uuids[1], true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resize_index() {
|
||||
let (mapper, env, _handle) = IndexMapper::test();
|
||||
let index = mapper.create_index(env.write_txn().unwrap(), "index", None).unwrap();
|
||||
assert_index_size(index, mapper.index_base_map_size);
|
||||
|
||||
mapper.resize_index(&env.read_txn().unwrap(), "index").unwrap();
|
||||
|
||||
let index = mapper.create_index(env.write_txn().unwrap(), "index", None).unwrap();
|
||||
assert_index_size(index, mapper.index_base_map_size + mapper.index_growth_amount);
|
||||
|
||||
mapper.resize_index(&env.read_txn().unwrap(), "index").unwrap();
|
||||
|
||||
let index = mapper.create_index(env.write_txn().unwrap(), "index", None).unwrap();
|
||||
assert_index_size(index, mapper.index_base_map_size + mapper.index_growth_amount * 2);
|
||||
}
|
||||
|
||||
fn assert_index_size(index: Index, expected: usize) {
|
||||
let expected = clamp_to_page_size(expected);
|
||||
let index_map_size = index.map_size().unwrap();
|
||||
assert_eq!(index_map_size, expected);
|
||||
}
|
||||
}
|
||||
@@ -1,453 +0,0 @@
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::Duration;
|
||||
use std::{fs, thread};
|
||||
|
||||
use log::error;
|
||||
use meilisearch_types::heed::types::{SerdeJson, Str};
|
||||
use meilisearch_types::heed::{Database, Env, RoTxn, RwTxn};
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::milli::{FieldDistribution, Index};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use self::index_map::IndexMap;
|
||||
use self::IndexStatus::{Available, BeingDeleted, Closing, Missing};
|
||||
use crate::uuid_codec::UuidCodec;
|
||||
use crate::{Error, Result};
|
||||
|
||||
mod index_map;
|
||||
|
||||
const INDEX_MAPPING: &str = "index-mapping";
|
||||
const INDEX_STATS: &str = "index-stats";
|
||||
|
||||
/// Structure managing meilisearch's indexes.
|
||||
///
|
||||
/// It is responsible for:
|
||||
/// 1. Creating new indexes
|
||||
/// 2. Opening indexes and storing references to these opened indexes
|
||||
/// 3. Accessing indexes through their uuid
|
||||
/// 4. Mapping a user-defined name to each index uuid.
|
||||
///
|
||||
/// # Implementation notes
|
||||
///
|
||||
/// An index exists as 3 bits of data:
|
||||
/// 1. The index data on disk, that can exist in 3 states: Missing, Present, or BeingDeleted.
|
||||
/// 2. The persistent database containing the association between the index' name and its UUID,
|
||||
/// that can exist in 2 states: Missing or Present.
|
||||
/// 3. The state of the index in the in-memory `IndexMap`, that can exist in multiple states:
|
||||
/// - Missing
|
||||
/// - Available
|
||||
/// - Closing (because an index needs resizing or was evicted from the cache)
|
||||
/// - BeingDeleted
|
||||
///
|
||||
/// All of this data should be kept consistent between index operations, which is achieved by the `IndexMapper`
|
||||
/// with the use of the following primitives:
|
||||
/// - A RwLock on the `IndexMap`.
|
||||
/// - Transactions on the association database.
|
||||
/// - ClosingEvent signals emitted when closing an environment.
|
||||
#[derive(Clone)]
|
||||
pub struct IndexMapper {
|
||||
/// Keep track of the opened indexes. Used mainly by the index resolver.
|
||||
index_map: Arc<RwLock<IndexMap>>,
|
||||
|
||||
/// Map an index name with an index uuid currently available on disk.
|
||||
pub(crate) index_mapping: Database<Str, UuidCodec>,
|
||||
/// Map an index UUID with the cached stats associated to the index.
|
||||
///
|
||||
/// Using an UUID forces to use the index_mapping table to recover the index behind a name, ensuring
|
||||
/// consistency wrt index swapping.
|
||||
pub(crate) index_stats: Database<UuidCodec, SerdeJson<IndexStats>>,
|
||||
|
||||
/// Path to the folder where the LMDB environments of each index are.
|
||||
base_path: PathBuf,
|
||||
/// The map size an index is opened with on the first time.
|
||||
index_base_map_size: usize,
|
||||
/// The quantity by which the map size of an index is incremented upon reopening, in bytes.
|
||||
index_growth_amount: usize,
|
||||
pub indexer_config: Arc<IndexerConfig>,
|
||||
}
|
||||
|
||||
/// Whether the index is available for use or is forbidden to be inserted back in the index map
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone)]
|
||||
pub enum IndexStatus {
|
||||
/// Not currently in the index map.
|
||||
Missing,
|
||||
/// Do not insert it back in the index map as it is currently being deleted.
|
||||
BeingDeleted,
|
||||
/// Temporarily do not insert the index in the index map as it is currently being resized/evicted from the map.
|
||||
Closing(index_map::ClosingIndex),
|
||||
/// You can use the index without worrying about anything.
|
||||
Available(Index),
|
||||
}
|
||||
|
||||
/// The statistics that can be computed from an `Index` object.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct IndexStats {
|
||||
/// Number of documents in the index.
|
||||
pub number_of_documents: u64,
|
||||
/// Size of the index' DB, in bytes.
|
||||
pub database_size: u64,
|
||||
/// Association of every field name with the number of times it occurs in the documents.
|
||||
pub field_distribution: FieldDistribution,
|
||||
/// Creation date of the index.
|
||||
pub created_at: OffsetDateTime,
|
||||
/// Date of the last update of the index.
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl IndexStats {
|
||||
/// Compute the stats of an index
|
||||
///
|
||||
/// # Parameters
|
||||
///
|
||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
||||
let database_size = index.on_disk_size()?;
|
||||
Ok(IndexStats {
|
||||
number_of_documents: index.number_of_documents(rtxn)?,
|
||||
database_size,
|
||||
field_distribution: index.field_distribution(rtxn)?,
|
||||
created_at: index.created_at(rtxn)?,
|
||||
updated_at: index.updated_at(rtxn)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMapper {
|
||||
pub fn new(
|
||||
env: &Env,
|
||||
base_path: PathBuf,
|
||||
index_base_map_size: usize,
|
||||
index_growth_amount: usize,
|
||||
index_count: usize,
|
||||
indexer_config: IndexerConfig,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
|
||||
index_mapping: env.create_database(Some(INDEX_MAPPING))?,
|
||||
index_stats: env.create_database(Some(INDEX_STATS))?,
|
||||
base_path,
|
||||
index_base_map_size,
|
||||
index_growth_amount,
|
||||
indexer_config: Arc::new(indexer_config),
|
||||
})
|
||||
}
|
||||
|
||||
/// Get or create the index.
|
||||
pub fn create_index(
|
||||
&self,
|
||||
mut wtxn: RwTxn,
|
||||
name: &str,
|
||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||
) -> Result<Index> {
|
||||
match self.index(&wtxn, name) {
|
||||
Ok(index) => {
|
||||
wtxn.commit()?;
|
||||
Ok(index)
|
||||
}
|
||||
Err(Error::IndexNotFound(_)) => {
|
||||
let uuid = Uuid::new_v4();
|
||||
self.index_mapping.put(&mut wtxn, name, &uuid)?;
|
||||
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
fs::create_dir_all(&index_path)?;
|
||||
|
||||
// Error if the UUIDv4 somehow already exists in the map, since it should be fresh.
|
||||
// This is very unlikely to happen in practice.
|
||||
// TODO: it would be better to lazily create the index. But we need an Index::open function for milli.
|
||||
let index = self.index_map.write().unwrap().create(
|
||||
&uuid,
|
||||
&index_path,
|
||||
date,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
|
||||
wtxn.commit()?;
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
error => error,
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the index from the mapping table and the in-memory index map
|
||||
/// but keeps the associated tasks.
|
||||
pub fn delete_index(&self, mut wtxn: RwTxn, name: &str) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(&wtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// Not an error if the index had no stats in cache.
|
||||
self.index_stats.delete(&mut wtxn, &uuid)?;
|
||||
|
||||
// Once we retrieved the UUID of the index we remove it from the mapping table.
|
||||
assert!(self.index_mapping.delete(&mut wtxn, name)?);
|
||||
|
||||
wtxn.commit()?;
|
||||
|
||||
let mut tries = 0;
|
||||
// Attempts to remove the index from the in-memory index map in a loop.
|
||||
//
|
||||
// If the index is currently being closed, we will wait for it to be closed and retry getting it in a subsequent
|
||||
// loop iteration.
|
||||
//
|
||||
// We make 100 attempts before giving up.
|
||||
// This could happen in the following situations:
|
||||
//
|
||||
// 1. There is a bug preventing the index from being correctly closed, or us from detecting this.
|
||||
// 2. A user of the index is keeping it open for more than 600 seconds. This could happen e.g. during a pathological search.
|
||||
// This can not be caused by indexation because deleting an index happens in the scheduler itself, so cannot be concurrent with indexation.
|
||||
//
|
||||
// In these situations, reporting the error through a panic is in order.
|
||||
let closing_event = loop {
|
||||
let mut lock = self.index_map.write().unwrap();
|
||||
match lock.start_deletion(&uuid) {
|
||||
Ok(env_closing) => break env_closing,
|
||||
Err(Some(reopen)) => {
|
||||
// drop the lock here so that we don't synchronously wait for the index to close.
|
||||
drop(lock);
|
||||
tries += 1;
|
||||
if tries >= 100 {
|
||||
panic!("Too many attempts to close index {name} prior to deletion.")
|
||||
}
|
||||
let reopen = if let Some(reopen) = reopen.wait_timeout(Duration::from_secs(6)) {
|
||||
reopen
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
reopen.close(&mut self.index_map.write().unwrap());
|
||||
continue;
|
||||
}
|
||||
Err(None) => return Ok(()),
|
||||
}
|
||||
};
|
||||
|
||||
let index_map = self.index_map.clone();
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
let index_name = name.to_string();
|
||||
thread::Builder::new()
|
||||
.name(String::from("index_deleter"))
|
||||
.spawn(move || {
|
||||
// We first wait to be sure that the previously opened index is effectively closed.
|
||||
// This can take a lot of time, this is why we do that in a separate thread.
|
||||
if let Some(closing_event) = closing_event {
|
||||
closing_event.wait();
|
||||
}
|
||||
|
||||
// Then we remove the content from disk.
|
||||
if let Err(e) = fs::remove_dir_all(&index_path) {
|
||||
error!(
|
||||
"An error happened when deleting the index {} ({}): {}",
|
||||
index_name, uuid, e
|
||||
);
|
||||
}
|
||||
|
||||
// Finally we remove the entry from the index map.
|
||||
index_map.write().unwrap().end_deletion(&uuid);
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn exists(&self, rtxn: &RoTxn, name: &str) -> Result<bool> {
|
||||
Ok(self.index_mapping.get(rtxn, name)?.is_some())
|
||||
}
|
||||
|
||||
/// Resizes the maximum size of the specified index to the double of its current maximum size.
|
||||
///
|
||||
/// This operation involves closing the underlying environment and so can take a long time to complete.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - If the Index corresponding to the passed name is concurrently being deleted/resized or cannot be found in the
|
||||
/// in memory hash map.
|
||||
pub fn resize_index(&self, rtxn: &RoTxn, name: &str) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
// We remove the index from the in-memory index map.
|
||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return an index, may open it if it wasn't already opened.
|
||||
pub fn index(&self, rtxn: &RoTxn, name: &str) -> Result<Index> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, name)?
|
||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||
|
||||
let mut tries = 0;
|
||||
// attempts to open the index in a loop.
|
||||
//
|
||||
// If the index is currently being closed, we will wait for it to be closed and retry getting it in a subsequent
|
||||
// loop iteration.
|
||||
//
|
||||
// We make 100 attempts before giving up.
|
||||
// This could happen in the following situations:
|
||||
//
|
||||
// 1. There is a bug preventing the index from being correctly closed, or us from detecting it was.
|
||||
// 2. A user of the index is keeping it open for more than 600 seconds. This could happen e.g. during a long indexation,
|
||||
// a pathological search, and so on.
|
||||
//
|
||||
// In these situations, reporting the error through a panic is in order.
|
||||
let index = loop {
|
||||
tries += 1;
|
||||
if tries > 100 {
|
||||
panic!("Too many spurious wake ups while trying to open the index {name}");
|
||||
}
|
||||
|
||||
// we get the index here to drop the lock before entering the match
|
||||
let index = self.index_map.read().unwrap().get(&uuid);
|
||||
|
||||
match index {
|
||||
Available(index) => break index,
|
||||
Closing(reopen) => {
|
||||
// Avoiding deadlocks: no lock taken while doing this operation.
|
||||
let reopen = if let Some(reopen) = reopen.wait_timeout(Duration::from_secs(6)) {
|
||||
reopen
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
// take the lock to reopen the environment.
|
||||
reopen.reopen(&mut self.index_map.write().unwrap(), &index_path)?;
|
||||
continue;
|
||||
}
|
||||
BeingDeleted => return Err(Error::IndexNotFound(name.to_string())),
|
||||
// since we're lazy, it's possible that the index has not been opened yet.
|
||||
Missing => {
|
||||
let mut index_map = self.index_map.write().unwrap();
|
||||
// between the read lock and the write lock it's not impossible
|
||||
// that someone already opened the index (eg if two searches happen
|
||||
// at the same time), thus before opening it we check a second time
|
||||
// if it's not already there.
|
||||
match index_map.get(&uuid) {
|
||||
Missing => {
|
||||
let index_path = self.base_path.join(uuid.to_string());
|
||||
|
||||
break index_map.create(
|
||||
&uuid,
|
||||
&index_path,
|
||||
None,
|
||||
self.index_base_map_size,
|
||||
)?;
|
||||
}
|
||||
Available(index) => break index,
|
||||
Closing(_) => {
|
||||
// the reopening will be handled in the next loop operation
|
||||
continue;
|
||||
}
|
||||
BeingDeleted => return Err(Error::IndexNotFound(name.to_string())),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Attempts `f` for each index that exists in the index mapper.
|
||||
///
|
||||
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
||||
/// which is unsupported in general.
|
||||
///
|
||||
/// Since `f` is allowed to return a result, and `Index` is cloneable, it is still possible to wrongly build e.g. a vector of
|
||||
/// all the indexes, but this function makes it harder and so less likely to do accidentally.
|
||||
pub fn try_for_each_index<U, V>(
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
mut f: impl FnMut(&str, &Index) -> Result<U>,
|
||||
) -> Result<V>
|
||||
where
|
||||
V: FromIterator<U>,
|
||||
{
|
||||
self.index_mapping
|
||||
.iter(rtxn)?
|
||||
.map(|res| {
|
||||
res.map_err(Error::from)
|
||||
.and_then(|(name, _)| self.index(rtxn, name).and_then(|index| f(name, &index)))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return the name of all indexes without opening them.
|
||||
pub fn index_names(&self, rtxn: &RoTxn) -> Result<Vec<String>> {
|
||||
self.index_mapping
|
||||
.iter(rtxn)?
|
||||
.map(|res| res.map_err(Error::from).map(|(name, _)| name.to_string()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Swap two index names.
|
||||
pub fn swap(&self, wtxn: &mut RwTxn, lhs: &str, rhs: &str) -> Result<()> {
|
||||
let lhs_uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, lhs)?
|
||||
.ok_or_else(|| Error::IndexNotFound(lhs.to_string()))?;
|
||||
let rhs_uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, rhs)?
|
||||
.ok_or_else(|| Error::IndexNotFound(rhs.to_string()))?;
|
||||
|
||||
self.index_mapping.put(wtxn, lhs, &rhs_uuid)?;
|
||||
self.index_mapping.put(wtxn, rhs, &lhs_uuid)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// The stats of an index.
|
||||
///
|
||||
/// If available in the cache, they are directly returned.
|
||||
/// Otherwise, the `Index` is opened to compute the stats on the fly (the result is not cached).
|
||||
/// The stats for an index are cached after each `Index` update.
|
||||
pub fn stats_of(&self, rtxn: &RoTxn, index_uid: &str) -> Result<IndexStats> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(rtxn, index_uid)?
|
||||
.ok_or_else(|| Error::IndexNotFound(index_uid.to_string()))?;
|
||||
|
||||
match self.index_stats.get(rtxn, &uuid)? {
|
||||
Some(stats) => Ok(stats),
|
||||
None => {
|
||||
let index = self.index(rtxn, index_uid)?;
|
||||
let index_rtxn = index.read_txn()?;
|
||||
IndexStats::new(&index, &index_rtxn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the new stats for an index.
|
||||
///
|
||||
/// Expected usage is to compute the stats the index using `IndexStats::new`, the pass it to this function.
|
||||
pub fn store_stats_of(
|
||||
&self,
|
||||
wtxn: &mut RwTxn,
|
||||
index_uid: &str,
|
||||
stats: &IndexStats,
|
||||
) -> Result<()> {
|
||||
let uuid = self
|
||||
.index_mapping
|
||||
.get(wtxn, index_uid)?
|
||||
.ok_or_else(|| Error::IndexNotFound(index_uid.to_string()))?;
|
||||
|
||||
self.index_stats.put(wtxn, &uuid, stats)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn index_exists(&self, rtxn: &RoTxn, name: &str) -> Result<bool> {
|
||||
Ok(self.index_mapping.get(rtxn, name)?.is_some())
|
||||
}
|
||||
|
||||
pub fn indexer_config(&self) -> &IndexerConfig {
|
||||
&self.indexer_config
|
||||
}
|
||||
}
|
||||
@@ -254,16 +254,6 @@ pub fn snapshot_canceled_by(
|
||||
snap
|
||||
}
|
||||
pub fn snapshot_index_mapper(rtxn: &RoTxn, mapper: &IndexMapper) -> String {
|
||||
let mut s = String::new();
|
||||
let names = mapper.index_names(rtxn).unwrap();
|
||||
|
||||
for name in names {
|
||||
let stats = mapper.stats_of(rtxn, &name).unwrap();
|
||||
s.push_str(&format!(
|
||||
"{name}: {{ number_of_documents: {}, field_distribution: {:?} }}\n",
|
||||
stats.number_of_documents, stats.field_distribution
|
||||
));
|
||||
}
|
||||
|
||||
s
|
||||
let names = mapper.indexes(rtxn).unwrap().into_iter().map(|(n, _)| n).collect::<Vec<_>>();
|
||||
format!("{names:?}")
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ pub mod error;
|
||||
mod index_mapper;
|
||||
#[cfg(test)]
|
||||
mod insta_snapshot;
|
||||
mod lru;
|
||||
mod utils;
|
||||
mod uuid_codec;
|
||||
|
||||
@@ -32,7 +31,7 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub type TaskId = u32;
|
||||
|
||||
use std::ops::{Bound, RangeBounds};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::Relaxed;
|
||||
use std::sync::{Arc, RwLock};
|
||||
@@ -44,9 +43,10 @@ use file_store::FileStore;
|
||||
use meilisearch_types::error::ResponseError;
|
||||
use meilisearch_types::heed::types::{OwnedType, SerdeBincode, SerdeJson, Str};
|
||||
use meilisearch_types::heed::{self, Database, Env, RoTxn};
|
||||
use meilisearch_types::milli;
|
||||
use meilisearch_types::milli::documents::DocumentsBatchBuilder;
|
||||
use meilisearch_types::milli::update::IndexerConfig;
|
||||
use meilisearch_types::milli::{self, CboRoaringBitmapCodec, Index, RoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::milli::{CboRoaringBitmapCodec, Index, RoaringBitmapCodec, BEU32};
|
||||
use meilisearch_types::tasks::{Kind, KindWithContent, Status, Task};
|
||||
use roaring::RoaringBitmap;
|
||||
use synchronoise::SignalEvent;
|
||||
@@ -229,12 +229,8 @@ pub struct IndexSchedulerOptions {
|
||||
pub dumps_path: PathBuf,
|
||||
/// The maximum size, in bytes, of the task index.
|
||||
pub task_db_size: usize,
|
||||
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
|
||||
pub index_base_map_size: usize,
|
||||
/// The size, in bytes, by which the map size of an index is increased when it resized due to being full.
|
||||
pub index_growth_amount: usize,
|
||||
/// The number of indexes that can be concurrently opened in memory.
|
||||
pub index_count: usize,
|
||||
/// The maximum size, in bytes, of each meilisearch index.
|
||||
pub index_size: usize,
|
||||
/// Configuration used during indexing for each meilisearch index.
|
||||
pub indexer_config: IndexerConfig,
|
||||
/// Set to `true` iff the index scheduler is allowed to automatically
|
||||
@@ -364,25 +360,9 @@ impl IndexScheduler {
|
||||
std::fs::create_dir_all(&options.indexes_path)?;
|
||||
std::fs::create_dir_all(&options.dumps_path)?;
|
||||
|
||||
let task_db_size = clamp_to_page_size(options.task_db_size);
|
||||
let budget = if options.indexer_config.skip_index_budget {
|
||||
IndexBudget {
|
||||
map_size: options.index_base_map_size,
|
||||
index_count: options.index_count,
|
||||
task_db_size,
|
||||
}
|
||||
} else {
|
||||
Self::index_budget(
|
||||
&options.tasks_path,
|
||||
options.index_base_map_size,
|
||||
task_db_size,
|
||||
options.index_count,
|
||||
)
|
||||
};
|
||||
|
||||
let env = heed::EnvOpenOptions::new()
|
||||
.max_dbs(10)
|
||||
.map_size(budget.task_db_size)
|
||||
.map_size(clamp_to_page_size(options.task_db_size))
|
||||
.open(options.tasks_path)?;
|
||||
let file_store = FileStore::new(&options.update_file_path)?;
|
||||
|
||||
@@ -402,9 +382,7 @@ impl IndexScheduler {
|
||||
index_mapper: IndexMapper::new(
|
||||
&env,
|
||||
options.indexes_path,
|
||||
budget.map_size,
|
||||
options.index_growth_amount,
|
||||
budget.index_count,
|
||||
options.index_size,
|
||||
options.indexer_config,
|
||||
)?,
|
||||
env,
|
||||
@@ -428,75 +406,6 @@ impl IndexScheduler {
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
fn index_budget(
|
||||
tasks_path: &Path,
|
||||
base_map_size: usize,
|
||||
mut task_db_size: usize,
|
||||
max_index_count: usize,
|
||||
) -> IndexBudget {
|
||||
#[cfg(windows)]
|
||||
const DEFAULT_BUDGET: usize = 6 * 1024 * 1024 * 1024 * 1024; // 6 TiB, 1 index
|
||||
#[cfg(not(windows))]
|
||||
const DEFAULT_BUDGET: usize = 80 * 1024 * 1024 * 1024 * 1024; // 80 TiB, 18 indexes
|
||||
|
||||
let budget = if Self::is_good_heed(tasks_path, DEFAULT_BUDGET) {
|
||||
DEFAULT_BUDGET
|
||||
} else {
|
||||
log::debug!("determining budget with dichotomic search");
|
||||
utils::dichotomic_search(DEFAULT_BUDGET / 2, |map_size| {
|
||||
Self::is_good_heed(tasks_path, map_size)
|
||||
})
|
||||
};
|
||||
|
||||
log::debug!("memmap budget: {budget}B");
|
||||
let mut budget = budget / 2;
|
||||
if task_db_size > (budget / 2) {
|
||||
task_db_size = clamp_to_page_size(budget * 2 / 5);
|
||||
log::debug!(
|
||||
"Decreasing max size of task DB to {task_db_size}B due to constrained memory space"
|
||||
);
|
||||
}
|
||||
budget -= task_db_size;
|
||||
|
||||
// won't be mutated again
|
||||
let budget = budget;
|
||||
let task_db_size = task_db_size;
|
||||
|
||||
log::debug!("index budget: {budget}B");
|
||||
let mut index_count = budget / base_map_size;
|
||||
if index_count < 2 {
|
||||
// take a bit less than half than the budget to make sure we can always afford to open an index
|
||||
let map_size = (budget * 2) / 5;
|
||||
// single index of max budget
|
||||
log::debug!("1 index of {map_size}B can be opened simultaneously.");
|
||||
return IndexBudget { map_size, index_count: 1, task_db_size };
|
||||
}
|
||||
// give us some space for an additional index when the cache is already full
|
||||
// decrement is OK because index_count >= 2.
|
||||
index_count -= 1;
|
||||
if index_count > max_index_count {
|
||||
index_count = max_index_count;
|
||||
}
|
||||
log::debug!("Up to {index_count} indexes of {base_map_size}B opened simultaneously.");
|
||||
IndexBudget { map_size: base_map_size, index_count, task_db_size }
|
||||
}
|
||||
|
||||
fn is_good_heed(tasks_path: &Path, map_size: usize) -> bool {
|
||||
if let Ok(env) =
|
||||
heed::EnvOpenOptions::new().map_size(clamp_to_page_size(map_size)).open(tasks_path)
|
||||
{
|
||||
env.prepare_for_closing().wait();
|
||||
true
|
||||
} else {
|
||||
// We're treating all errors equally here, not only allocation errors.
|
||||
// This means there's a possiblity for the budget to lower due to errors different from allocation errors.
|
||||
// For persistent errors, this is OK as long as the task db is then reopened normally without ignoring the error this time.
|
||||
// For transient errors, this could lead to an instance with too low a budget.
|
||||
// However transient errors are: 1) less likely than persistent errors 2) likely to cause other issues down the line anyway.
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_txn(&self) -> Result<RoTxn> {
|
||||
self.env.read_txn().map_err(|e| e.into())
|
||||
}
|
||||
@@ -513,12 +422,12 @@ impl IndexScheduler {
|
||||
#[cfg(test)]
|
||||
run.breakpoint(Breakpoint::Init);
|
||||
|
||||
run.wake_up.wait();
|
||||
|
||||
loop {
|
||||
run.wake_up.wait();
|
||||
|
||||
match run.tick() {
|
||||
Ok(TickOutcome::TickAgain(_)) => (),
|
||||
Ok(TickOutcome::WaitForSignal) => run.wake_up.wait(),
|
||||
Ok(0) => (),
|
||||
Ok(_) => run.wake_up.signal(),
|
||||
Err(e) => {
|
||||
log::error!("{}", e);
|
||||
// Wait one second when an irrecoverable error occurs.
|
||||
@@ -531,6 +440,7 @@ impl IndexScheduler {
|
||||
) {
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
run.wake_up.signal();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,42 +460,15 @@ impl IndexScheduler {
|
||||
///
|
||||
/// * If the index wasn't opened before, the index will be opened.
|
||||
/// * If the index doesn't exist on disk, the `IndexNotFoundError` is thrown.
|
||||
///
|
||||
/// ### Note
|
||||
///
|
||||
/// As an `Index` requires a large swath of the virtual memory address space, correct usage of an `Index` does not
|
||||
/// keep its handle for too long.
|
||||
///
|
||||
/// Some configurations also can't reasonably open multiple indexes at once.
|
||||
/// If you need to fetch information from or perform an action on all indexes,
|
||||
/// see the `try_for_each_index` function.
|
||||
pub fn index(&self, name: &str) -> Result<Index> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
self.index_mapper.index(&rtxn, name)
|
||||
}
|
||||
|
||||
/// Return the name of all indexes without opening them.
|
||||
pub fn index_names(&self) -> Result<Vec<String>> {
|
||||
/// Return and open all the indexes.
|
||||
pub fn indexes(&self) -> Result<Vec<(String, Index)>> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
self.index_mapper.index_names(&rtxn)
|
||||
}
|
||||
|
||||
/// Attempts `f` for each index that exists known to the index scheduler.
|
||||
///
|
||||
/// It is preferable to use this function rather than a loop that opens all indexes, as a way to avoid having all indexes opened,
|
||||
/// which is unsupported in general.
|
||||
///
|
||||
/// Since `f` is allowed to return a result, and `Index` is cloneable, it is still possible to wrongly build e.g. a vector of
|
||||
/// all the indexes, but this function makes it harder and so less likely to do accidentally.
|
||||
///
|
||||
/// If many indexes exist, this operation can take time to complete (in the order of seconds for a 1000 of indexes) as it needs to open
|
||||
/// all the indexes.
|
||||
pub fn try_for_each_index<U, V>(&self, f: impl FnMut(&str, &Index) -> Result<U>) -> Result<V>
|
||||
where
|
||||
V: FromIterator<U>,
|
||||
{
|
||||
let rtxn = self.env.read_txn()?;
|
||||
self.index_mapper.try_for_each_index(&rtxn, f)
|
||||
self.index_mapper.indexes(&rtxn)
|
||||
}
|
||||
|
||||
/// Return the task ids matched by the given query from the index scheduler's point of view.
|
||||
@@ -747,13 +630,13 @@ impl IndexScheduler {
|
||||
&self,
|
||||
rtxn: &RoTxn,
|
||||
query: &Query,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
authorized_indexes: &Option<Vec<String>>,
|
||||
) -> Result<RoaringBitmap> {
|
||||
let mut tasks = self.get_task_ids(rtxn, query)?;
|
||||
|
||||
// If the query contains a list of index uid or there is a finite list of authorized indexes,
|
||||
// then we must exclude all the kinds that aren't associated to one and only one index.
|
||||
if query.index_uids.is_some() || !filters.all_indexes_authorized() {
|
||||
if query.index_uids.is_some() || authorized_indexes.is_some() {
|
||||
for kind in enum_iterator::all::<Kind>().filter(|kind| !kind.related_to_one_index()) {
|
||||
tasks -= self.get_kind(rtxn, kind)?;
|
||||
}
|
||||
@@ -761,11 +644,11 @@ impl IndexScheduler {
|
||||
|
||||
// Any task that is internally associated with a non-authorized index
|
||||
// must be discarded.
|
||||
if !filters.all_indexes_authorized() {
|
||||
if let Some(authorized_indexes) = authorized_indexes {
|
||||
let all_indexes_iter = self.index_tasks.iter(rtxn)?;
|
||||
for result in all_indexes_iter {
|
||||
let (index, index_tasks) = result?;
|
||||
if !filters.is_index_authorized(index) {
|
||||
if !authorized_indexes.contains(&index.to_owned()) {
|
||||
tasks -= index_tasks;
|
||||
}
|
||||
}
|
||||
@@ -785,11 +668,12 @@ impl IndexScheduler {
|
||||
pub fn get_tasks_from_authorized_indexes(
|
||||
&self,
|
||||
query: Query,
|
||||
filters: &meilisearch_auth::AuthFilter,
|
||||
authorized_indexes: Option<Vec<String>>,
|
||||
) -> Result<Vec<Task>> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
let tasks = self.get_task_ids_from_authorized_indexes(&rtxn, &query, filters)?;
|
||||
let tasks =
|
||||
self.get_task_ids_from_authorized_indexes(&rtxn, &query, &authorized_indexes)?;
|
||||
|
||||
let tasks = self.get_existing_tasks(
|
||||
&rtxn,
|
||||
@@ -880,8 +764,8 @@ impl IndexScheduler {
|
||||
Ok(task)
|
||||
}
|
||||
|
||||
/// Register a new task coming from a dump in the scheduler.
|
||||
/// By taking a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
||||
/// Register a new task comming from a dump in the scheduler.
|
||||
/// By takinig a mutable ref we're pretty sure no one will ever import a dump while actix is running.
|
||||
pub fn register_dumped_task(
|
||||
&mut self,
|
||||
task: TaskDump,
|
||||
@@ -987,18 +871,6 @@ impl IndexScheduler {
|
||||
(bitmap.insert(task.uid));
|
||||
})?;
|
||||
|
||||
utils::insert_task_datetime(&mut wtxn, self.enqueued_at, task.enqueued_at, task.uid)?;
|
||||
|
||||
// we can't override the started_at & finished_at, so we must only set it if the tasks is finished and won't change
|
||||
if matches!(task.status, Status::Succeeded | Status::Failed | Status::Canceled) {
|
||||
if let Some(started_at) = task.started_at {
|
||||
utils::insert_task_datetime(&mut wtxn, self.started_at, started_at, task.uid)?;
|
||||
}
|
||||
if let Some(finished_at) = task.finished_at {
|
||||
utils::insert_task_datetime(&mut wtxn, self.finished_at, finished_at, task.uid)?;
|
||||
}
|
||||
}
|
||||
|
||||
wtxn.commit()?;
|
||||
self.wake_up.signal();
|
||||
|
||||
@@ -1054,7 +926,7 @@ impl IndexScheduler {
|
||||
/// 5. Reset the in-memory list of processed tasks.
|
||||
///
|
||||
/// Returns the number of processed tasks.
|
||||
fn tick(&self) -> Result<TickOutcome> {
|
||||
fn tick(&self) -> Result<usize> {
|
||||
#[cfg(test)]
|
||||
{
|
||||
*self.run_loop_iteration.write().unwrap() += 1;
|
||||
@@ -1065,9 +937,8 @@ impl IndexScheduler {
|
||||
let batch =
|
||||
match self.create_next_batch(&rtxn).map_err(|e| Error::CreateBatch(Box::new(e)))? {
|
||||
Some(batch) => batch,
|
||||
None => return Ok(TickOutcome::WaitForSignal),
|
||||
None => return Ok(0),
|
||||
};
|
||||
let index_uid = batch.index_uid().map(ToOwned::to_owned);
|
||||
drop(rtxn);
|
||||
|
||||
// 1. store the starting date with the bitmap of processing tasks.
|
||||
@@ -1138,23 +1009,7 @@ impl IndexScheduler {
|
||||
// the `started_at` date times and `processings` of the current processing tasks.
|
||||
// This date time is used by the task cancelation to store the right `started_at`
|
||||
// date in the task on disk.
|
||||
return Ok(TickOutcome::TickAgain(0));
|
||||
}
|
||||
// If an index said it was full, we need to:
|
||||
// 1. identify which index is full
|
||||
// 2. close the associated environment
|
||||
// 3. resize it
|
||||
// 4. re-schedule tasks
|
||||
Err(Error::Milli(milli::Error::UserError(
|
||||
milli::UserError::MaxDatabaseSizeReached,
|
||||
))) if index_uid.is_some() => {
|
||||
// fixme: add index_uid to match to avoid the unwrap
|
||||
let index_uid = index_uid.unwrap();
|
||||
// fixme: handle error more gracefully? not sure when this could happen
|
||||
self.index_mapper.resize_index(&wtxn, &index_uid)?;
|
||||
wtxn.abort().map_err(Error::HeedTransaction)?;
|
||||
|
||||
return Ok(TickOutcome::TickAgain(0));
|
||||
return Ok(0);
|
||||
}
|
||||
// In case of a failure we must get back and patch all the tasks with the error.
|
||||
Err(err) => {
|
||||
@@ -1194,15 +1049,7 @@ impl IndexScheduler {
|
||||
#[cfg(test)]
|
||||
self.breakpoint(Breakpoint::AfterProcessing);
|
||||
|
||||
Ok(TickOutcome::TickAgain(processed_tasks))
|
||||
}
|
||||
|
||||
pub fn index_stats(&self, index_uid: &str) -> Result<IndexStats> {
|
||||
let is_indexing = self.is_index_processing(index_uid)?;
|
||||
let rtxn = self.read_txn()?;
|
||||
let index_stats = self.index_mapper.stats_of(&rtxn, index_uid)?;
|
||||
|
||||
Ok(IndexStats { is_indexing, inner_stats: index_stats })
|
||||
Ok(processed_tasks)
|
||||
}
|
||||
|
||||
pub(crate) fn delete_persisted_task_data(&self, task: &Task) -> Result<()> {
|
||||
@@ -1237,37 +1084,6 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
|
||||
/// The outcome of calling the [`IndexScheduler::tick`] function.
|
||||
pub enum TickOutcome {
|
||||
/// The scheduler should immediately attempt another `tick`.
|
||||
///
|
||||
/// The `usize` field contains the number of processed tasks.
|
||||
TickAgain(usize),
|
||||
/// The scheduler should wait for an external signal before attempting another `tick`.
|
||||
WaitForSignal,
|
||||
}
|
||||
|
||||
/// How many indexes we can afford to have open simultaneously.
|
||||
struct IndexBudget {
|
||||
/// Map size of an index.
|
||||
map_size: usize,
|
||||
/// Maximum number of simultaneously opened indexes.
|
||||
index_count: usize,
|
||||
/// For very constrained systems we might need to reduce the base task_db_size so we can accept at least one index.
|
||||
task_db_size: usize,
|
||||
}
|
||||
|
||||
/// The statistics that can be computed from an `Index` object and the scheduler.
|
||||
///
|
||||
/// Compared with `index_mapper::IndexStats`, it adds the scheduling status.
|
||||
#[derive(Debug)]
|
||||
pub struct IndexStats {
|
||||
/// Whether this index is currently performing indexation, according to the scheduler.
|
||||
pub is_indexing: bool,
|
||||
/// Internal stats computed from the index.
|
||||
pub inner_stats: index_mapper::IndexStats,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::{BufWriter, Seek, Write};
|
||||
@@ -1277,9 +1093,7 @@ mod tests {
|
||||
use crossbeam::channel::RecvTimeoutError;
|
||||
use file_store::File;
|
||||
use meili_snap::snapshot;
|
||||
use meilisearch_auth::AuthFilter;
|
||||
use meilisearch_types::document_formats::DocumentFormatError;
|
||||
use meilisearch_types::index_uid_pattern::IndexUidPattern;
|
||||
use meilisearch_types::milli::obkv_to_json;
|
||||
use meilisearch_types::milli::update::IndexDocumentsMethod::{
|
||||
ReplaceDocuments, UpdateDocuments,
|
||||
@@ -1313,8 +1127,6 @@ mod tests {
|
||||
let tempdir = TempDir::new().unwrap();
|
||||
let (sender, receiver) = crossbeam::channel::bounded(0);
|
||||
|
||||
let indexer_config = IndexerConfig { skip_index_budget: true, ..Default::default() };
|
||||
|
||||
let options = IndexSchedulerOptions {
|
||||
version_file_path: tempdir.path().join(VERSION_FILE_NAME),
|
||||
auth_path: tempdir.path().join("auth"),
|
||||
@@ -1324,10 +1136,8 @@ mod tests {
|
||||
snapshots_path: tempdir.path().join("snapshots"),
|
||||
dumps_path: tempdir.path().join("dumps"),
|
||||
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||
index_growth_amount: 1000 * 1000, // 1 MB
|
||||
index_count: 5,
|
||||
indexer_config,
|
||||
index_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||
indexer_config: IndexerConfig::default(),
|
||||
autobatching_enabled,
|
||||
};
|
||||
|
||||
@@ -2435,45 +2245,38 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
let query = Query { limit: Some(0), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
let query = Query { limit: Some(1), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
let query = Query { limit: Some(2), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query { from: Some(2), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(1), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||
|
||||
let query = Query { from: Some(1), limit: Some(2), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
}
|
||||
|
||||
@@ -2498,24 +2301,21 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Processing]), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]"); // only the processing tasks in the first tick
|
||||
|
||||
let query = Query { statuses: Some(vec![Status::Enqueued]), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]"); // only the enqueued tasks in the first tick
|
||||
|
||||
let query = Query {
|
||||
statuses: Some(vec![Status::Enqueued, Status::Processing]),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,]"); // both enqueued and processing tasks in the first tick
|
||||
|
||||
let query = Query {
|
||||
@@ -2523,9 +2323,8 @@ mod tests {
|
||||
after_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test, which should excludes the enqueued tasks
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]");
|
||||
@@ -2535,9 +2334,8 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes before the start of the test, which should excludes all of them
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
@@ -2548,9 +2346,8 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both enqueued and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test and before one minute after the start of the test,
|
||||
// which should exclude the enqueued tasks and include the only processing task
|
||||
@@ -2575,9 +2372,8 @@ mod tests {
|
||||
before_started_at: Some(start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the test and before one minute after the start of the test,
|
||||
// which should include all tasks
|
||||
@@ -2588,9 +2384,8 @@ mod tests {
|
||||
before_started_at: Some(start_time),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes before the start of the test, which should exclude all tasks
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
@@ -2601,9 +2396,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// both succeeded and processing tasks in the first tick, but limited to those with a started_at
|
||||
// that comes after the start of the second part of the test and before one minute after the
|
||||
// second start of the test, which should exclude all tasks
|
||||
@@ -2621,9 +2415,8 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// we run the same query to verify that, and indeed find that the last task is matched
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@@ -2633,9 +2426,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// enqueued, succeeded, or processing tasks started after the second part of the test, should
|
||||
// again only return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
@@ -2645,9 +2437,8 @@ mod tests {
|
||||
|
||||
// now the last task should have failed
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "end");
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// so running the last query should return nothing
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
@@ -2657,9 +2448,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@@ -2669,9 +2459,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// but the same query on failed tasks should return the last task
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
|
||||
@@ -2682,9 +2471,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// same query but with an invalid uid
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[]");
|
||||
|
||||
@@ -2695,9 +2483,8 @@ mod tests {
|
||||
before_started_at: Some(second_start_time + Duration::minutes(1)),
|
||||
..Default::default()
|
||||
};
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// same query but with a valid uid
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[2,]");
|
||||
}
|
||||
@@ -2727,21 +2514,14 @@ mod tests {
|
||||
let rtxn = index_scheduler.env.read_txn().unwrap();
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// only the first task associated with catto is returned, the indexSwap tasks are excluded!
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,]");
|
||||
|
||||
let query = Query { index_uids: Some(vec!["catto".to_owned()]), ..Default::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &Some(vec!["doggo".to_owned()]))
|
||||
.unwrap();
|
||||
// we have asked for only the tasks associated with catto, but are only authorized to retrieve the tasks
|
||||
// associated with doggo -> empty result
|
||||
@@ -2749,13 +2529,7 @@ mod tests {
|
||||
|
||||
let query = Query::default();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &Some(vec!["doggo".to_owned()]))
|
||||
.unwrap();
|
||||
// we asked for all the tasks, but we are only authorized to retrieve the doggo tasks
|
||||
// -> only the index creation of doggo should be returned
|
||||
@@ -2766,14 +2540,7 @@ mod tests {
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![
|
||||
IndexUidPattern::new_unchecked("catto"),
|
||||
IndexUidPattern::new_unchecked("doggo"),
|
||||
]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
&Some(vec!["catto".to_owned(), "doggo".to_owned()]),
|
||||
)
|
||||
.unwrap();
|
||||
// we asked for all the tasks, but we are only authorized to retrieve the doggo and catto tasks
|
||||
@@ -2781,9 +2548,8 @@ mod tests {
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,]");
|
||||
|
||||
let query = Query::default();
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// we asked for all the tasks with all index authorized -> all tasks returned
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[0,1,2,3,]");
|
||||
}
|
||||
@@ -2814,22 +2580,15 @@ mod tests {
|
||||
|
||||
let rtxn = index_scheduler.read_txn().unwrap();
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &AuthFilter::default())
|
||||
.unwrap();
|
||||
let tasks =
|
||||
index_scheduler.get_task_ids_from_authorized_indexes(&rtxn, &query, &None).unwrap();
|
||||
// 0 is not returned because it was not canceled, 3 is not returned because it is the uid of the
|
||||
// taskCancelation itself
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,2,]");
|
||||
|
||||
let query = Query { canceled_by: Some(vec![task_cancelation.uid]), ..Query::default() };
|
||||
let tasks = index_scheduler
|
||||
.get_task_ids_from_authorized_indexes(
|
||||
&rtxn,
|
||||
&query,
|
||||
&AuthFilter::with_allowed_indexes(
|
||||
vec![IndexUidPattern::new_unchecked("doggo")].into_iter().collect(),
|
||||
),
|
||||
)
|
||||
.get_task_ids_from_authorized_indexes(&rtxn, &query, &Some(vec!["doggo".to_string()]))
|
||||
.unwrap();
|
||||
// Return only 1 because the user is not authorized to see task 2
|
||||
snapshot!(snapshot_bitmap(&tasks), @"[1,]");
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
//! Thread-safe `Vec`-backend LRU cache using [`std::sync::atomic::AtomicU64`] for synchronization.
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
/// Thread-safe `Vec`-backend LRU cache
|
||||
#[derive(Debug)]
|
||||
pub struct Lru<T> {
|
||||
data: Vec<(AtomicU64, T)>,
|
||||
generation: AtomicU64,
|
||||
cap: usize,
|
||||
}
|
||||
|
||||
impl<T> Lru<T> {
|
||||
/// Creates a new LRU cache with the specified capacity.
|
||||
///
|
||||
/// The capacity is allocated up-front, and will never change through a [`Self::put`] operation.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - If the capacity is 0.
|
||||
/// - If the capacity exceeds `isize::MAX` bytes.
|
||||
pub fn new(cap: usize) -> Self {
|
||||
assert_ne!(cap, 0, "The capacity of a cache cannot be 0");
|
||||
Self {
|
||||
// Note: since the element of the vector contains an AtomicU64, it is definitely not zero-sized so cap will never be usize::MAX.
|
||||
data: Vec::with_capacity(cap),
|
||||
generation: AtomicU64::new(0),
|
||||
cap,
|
||||
}
|
||||
}
|
||||
|
||||
/// The capacity of this LRU cache, that is the maximum number of elements it can hold before evicting elements from the cache.
|
||||
///
|
||||
/// The cache will contain at most this number of elements at any given time.
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.cap
|
||||
}
|
||||
|
||||
fn next_generation(&self) -> u64 {
|
||||
// Acquire so this "happens-before" any potential store to a data cell (with Release ordering)
|
||||
let generation = self.generation.fetch_add(1, Ordering::Acquire);
|
||||
generation + 1
|
||||
}
|
||||
|
||||
fn next_generation_mut(&mut self) -> u64 {
|
||||
let generation = self.generation.get_mut();
|
||||
*generation += 1;
|
||||
*generation
|
||||
}
|
||||
|
||||
/// Add a value in the cache, evicting an older value if necessary.
|
||||
///
|
||||
/// If a value was evicted from the cache, it is returned.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - If the cache is full, then linear in the capacity.
|
||||
/// - Otherwise constant.
|
||||
pub fn put(&mut self, value: T) -> Option<T> {
|
||||
// no need for a memory fence: we assume that whichever mechanism provides us synchronization
|
||||
// (very probably, a RwLock) takes care of fencing for us.
|
||||
|
||||
let next_generation = self.next_generation_mut();
|
||||
let evicted = if self.is_full() { self.pop() } else { None };
|
||||
self.data.push((AtomicU64::new(next_generation), value));
|
||||
evicted
|
||||
}
|
||||
|
||||
/// Evict the oldest value from the cache.
|
||||
///
|
||||
/// If the cache is empty, `None` will be returned.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - Linear in the capacity of the cache.
|
||||
pub fn pop(&mut self) -> Option<T> {
|
||||
// Don't use `Iterator::min_by_key` that provides shared references to its elements,
|
||||
// so that we can get an exclusive one.
|
||||
// This allows to handles the `AtomicU64`s as normal integers without using atomic instructions.
|
||||
let mut min_generation_index = None;
|
||||
for (index, (generation, _)) in self.data.iter_mut().enumerate() {
|
||||
let generation = *generation.get_mut();
|
||||
if let Some((_, min_generation)) = min_generation_index {
|
||||
if min_generation > generation {
|
||||
min_generation_index = Some((index, generation));
|
||||
}
|
||||
} else {
|
||||
min_generation_index = Some((index, generation))
|
||||
}
|
||||
}
|
||||
min_generation_index.map(|(min_index, _)| self.data.swap_remove(min_index).1)
|
||||
}
|
||||
|
||||
/// The current number of elements in the cache.
|
||||
///
|
||||
/// This value is guaranteed to be less than or equal to [`Self::capacity`].
|
||||
pub fn len(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
/// Returns `true` if putting any additional element in the cache would cause the eviction of an element.
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.len() == self.capacity()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct LruMap<K, V>(Lru<(K, V)>);
|
||||
|
||||
impl<K, V> LruMap<K, V>
|
||||
where
|
||||
K: Eq,
|
||||
{
|
||||
/// Creates a new LRU cache map with the specified capacity.
|
||||
///
|
||||
/// The capacity is allocated up-front, and will never change through a [`Self::insert`] operation.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - If the capacity is 0.
|
||||
/// - If the capacity exceeds `isize::MAX` bytes.
|
||||
pub fn new(cap: usize) -> Self {
|
||||
Self(Lru::new(cap))
|
||||
}
|
||||
|
||||
/// Gets a value in the cache map by its key.
|
||||
///
|
||||
/// If no value matches, `None` will be returned.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - Linear in the capacity of the cache.
|
||||
pub fn get(&self, key: &K) -> Option<&V> {
|
||||
for (generation, (candidate, value)) in self.0.data.iter() {
|
||||
if key == candidate {
|
||||
generation.store(self.0.next_generation(), Ordering::Release);
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Gets a value in the cache map by its key.
|
||||
///
|
||||
/// If no value matches, `None` will be returned.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - Linear in the capacity of the cache.
|
||||
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
||||
let next_generation = self.0.next_generation_mut();
|
||||
for (generation, (candidate, value)) in self.0.data.iter_mut() {
|
||||
if key == candidate {
|
||||
*generation.get_mut() = next_generation;
|
||||
return Some(value);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Inserts a value in the cache map by its key, replacing any existing value and returning any evicted value.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - Linear in the capacity of the cache.
|
||||
pub fn insert(&mut self, key: K, mut value: V) -> InsertionOutcome<K, V> {
|
||||
match self.get_mut(&key) {
|
||||
Some(old_value) => {
|
||||
std::mem::swap(old_value, &mut value);
|
||||
InsertionOutcome::Replaced(value)
|
||||
}
|
||||
None => match self.0.put((key, value)) {
|
||||
Some((key, value)) => InsertionOutcome::Evicted(key, value),
|
||||
None => InsertionOutcome::InsertedNew,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes an element from the cache map by its key, returning its value.
|
||||
///
|
||||
/// Returns `None` if there was no element with this key in the cache.
|
||||
///
|
||||
/// # Complexity
|
||||
///
|
||||
/// - Linear in the capacity of the cache.
|
||||
pub fn remove(&mut self, key: &K) -> Option<V> {
|
||||
for (index, (_, (candidate, _))) in self.0.data.iter_mut().enumerate() {
|
||||
if key == candidate {
|
||||
return Some(self.0.data.swap_remove(index).1 .1);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of an insertion in a LRU map.
|
||||
pub enum InsertionOutcome<K, V> {
|
||||
/// The key was not in the cache, the key-value pair has been inserted.
|
||||
InsertedNew,
|
||||
/// The key was not in the cache and an old key-value pair was evicted from the cache to make room for its insertions.
|
||||
Evicted(K, V),
|
||||
/// The key was already in the cache map, its value has been updated.
|
||||
Replaced(V),
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
assertion_line: 1755
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
@@ -22,7 +23,7 @@ canceled [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
1 [0,]
|
||||
|
||||
@@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -25,9 +25,7 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
beavero: { number_of_documents: 0, field_distribution: {} }
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["beavero", "catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
assertion_line: 1859
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
@@ -26,9 +27,7 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
beavero: { number_of_documents: 0, field_distribution: {} }
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["beavero", "catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
3 [1,2,]
|
||||
|
||||
@@ -23,8 +23,7 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -25,8 +25,7 @@ catto [0,]
|
||||
wolfo [2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,8 +20,7 @@ enqueued [0,1,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
assertion_line: 1818
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
@@ -22,8 +23,7 @@ canceled [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
1 [0,]
|
||||
|
||||
@@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -21,8 +21,7 @@ succeeded [0,1,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
1 []
|
||||
|
||||
@@ -19,8 +19,7 @@ succeeded [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 1, field_distribution: {"id": 1} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -27,10 +27,7 @@ doggos [0,3,]
|
||||
girafos [2,5,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
cattos: { number_of_documents: 0, field_distribution: {} }
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
girafos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["cattos", "doggos", "girafos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -19,8 +19,7 @@ succeeded [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
1 {uid: 1, status: succeeded, details: { received_document_ids: 2, deleted_documents: Some(2) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
"documentDeletion" [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
[
|
||||
{
|
||||
"id": 3,
|
||||
"doggo": "bork"
|
||||
}
|
||||
]
|
||||
@@ -1,37 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
1 {uid: 1, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
"documentDeletion" [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -23,8 +23,7 @@ succeeded [0,]
|
||||
doggos [0,1,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ succeeded [0,1,2,]
|
||||
doggos [0,1,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ enqueued [0,1,2,]
|
||||
doggos [0,1,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ succeeded [0,1,]
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [1,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [1,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 3, field_distribution: {"catto": 1, "doggo": 2, "id": 3} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"doggo": "jean bob"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"catto": "jorts"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"doggo": "bork"
|
||||
}
|
||||
]
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ failed [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ failed [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,8 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -19,8 +19,7 @@ succeeded [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
index_a [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
index_a [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ index_a [0,]
|
||||
index_b [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ index_a [0,2,]
|
||||
index_b [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ failed [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
catto [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ cattos [1,]
|
||||
doggos [0,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,9 +23,7 @@ cattos [1,]
|
||||
doggos [0,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
cattos: { number_of_documents: 0, field_distribution: {} }
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["cattos", "doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ cattos [1,]
|
||||
doggos [0,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
cattos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["cattos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ cattos [1,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ cattos [1,]
|
||||
doggos [0,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ succeeded [0,]
|
||||
doggos [0,1,2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ succeeded [0,1,2,3,]
|
||||
doggos [0,1,2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ enqueued [0,]
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ enqueued [0,1,2,3,]
|
||||
doggos [0,1,2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ enqueued [0,1,]
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ enqueued [0,1,2,]
|
||||
doggos [0,1,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ succeeded [0,1,]
|
||||
doggos [0,1,2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -23,8 +23,7 @@ succeeded [0,1,2,]
|
||||
doggos [0,1,2,3,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["doggos"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
@@ -26,8 +26,7 @@ catto [0,2,]
|
||||
doggo [1,2,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["catto"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
3 [1,2,]
|
||||
|
||||
@@ -23,10 +23,7 @@ doggo [0,]
|
||||
whalo [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
catto: { number_of_documents: 0, field_distribution: {} }
|
||||
doggo: { number_of_documents: 0, field_distribution: {} }
|
||||
whalo: { number_of_documents: 0, field_distribution: {} }
|
||||
|
||||
["catto", "doggo", "whalo"]
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user