mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-11-23 13:16:33 +00:00
Compare commits
69 Commits
tmp_no_rr_
...
index-stat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a41c0ba755 | ||
|
|
ef9875256b | ||
|
|
040b5a5b6f | ||
|
|
530a3e2df3 | ||
|
|
28404d56b7 | ||
|
|
262c1f2baf | ||
|
|
cfed349aa3 | ||
|
|
bbc9f68ff5 | ||
|
|
45636d315c | ||
|
|
cb9d78fc7f | ||
|
|
01d2ee5cc1 | ||
|
|
e0c4682758 | ||
|
|
d9b4b39922 | ||
|
|
2da86b31a6 | ||
|
|
4e81445d42 | ||
|
|
4829348d6e | ||
|
|
047d22fcb1 | ||
|
|
a2a3b8c973 | ||
|
|
9f37b61666 | ||
|
|
c15c076da9 | ||
|
|
9dcf1da59d | ||
|
|
8628a0c856 | ||
|
|
c1e3cc04b0 | ||
|
|
d96d8bb0dd | ||
|
|
4a3405afec | ||
|
|
3cfd653db1 | ||
|
|
b6b6a80b76 | ||
|
|
f3e2f79290 | ||
|
|
f517274d1f | ||
|
|
3f41bc642a | ||
|
|
672abdb341 | ||
|
|
a13ed4d0b0 | ||
|
|
4cc2988482 | ||
|
|
26c7e31f25 | ||
|
|
b2dee07b5e | ||
|
|
d963b5f85a | ||
|
|
2acc3ec5ee | ||
|
|
da04edff8c | ||
|
|
85a80f4f4c | ||
|
|
1213ec7164 | ||
|
|
f03d99690d | ||
|
|
23a5b45ebf | ||
|
|
46fa99f486 | ||
|
|
67a583bedf | ||
|
|
99e9057684 | ||
|
|
8d40d300a5 | ||
|
|
6c6387d05e | ||
|
|
51dce9e9d1 | ||
|
|
c9b65677bf | ||
|
|
35d5556f1f | ||
|
|
c433bdd1cd | ||
|
|
2db09725f8 | ||
|
|
fdb23132d4 | ||
|
|
11b95284cd | ||
|
|
1b601f70c6 | ||
|
|
8185731bbf | ||
|
|
840727d76f | ||
|
|
ead07d0b9d | ||
|
|
44f231d41e | ||
|
|
3c5d1c93de | ||
|
|
57d53de402 | ||
|
|
002f42875f | ||
|
|
22213dc604 | ||
|
|
602ad98cb8 | ||
|
|
7f619ff0e4 | ||
|
|
4391cba6ca | ||
|
|
d7ddf4925e | ||
|
|
918ce1dd67 | ||
|
|
8095f21999 |
@@ -2,4 +2,3 @@ target
|
|||||||
Dockerfile
|
Dockerfile
|
||||||
.dockerignore
|
.dockerignore
|
||||||
.gitignore
|
.gitignore
|
||||||
**/.git
|
|
||||||
|
|||||||
47
.github/scripts/check-release.sh
vendored
47
.github/scripts/check-release.sh
vendored
@@ -1,24 +1,41 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
# check_tag $current_tag $file_tag $file_name
|
check_tag() {
|
||||||
function check_tag {
|
local expected=$1
|
||||||
if [[ "$1" != "$2" ]]; then
|
local actual=$2
|
||||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
local filename=$3
|
||||||
ret=1
|
|
||||||
fi
|
if [[ $actual != $expected ]]; then
|
||||||
|
echo >&2 "Error: the current tag does not match the version in $filename: found $actual, expected $expected"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
read_version() {
|
||||||
|
grep '^version = ' | cut -d \" -f 2
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ -z "${GITHUB_REF:-}" ]]; then
|
||||||
|
echo >&2 "Error: GITHUB_REF is not set"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! "$GITHUB_REF" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+(-[a-z0-9]+)?$ ]]; then
|
||||||
|
echo >&2 "Error: GITHUB_REF is not a valid tag: $GITHUB_REF"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
current_tag=${GITHUB_REF#refs/tags/v}
|
||||||
ret=0
|
ret=0
|
||||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
|
||||||
|
|
||||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
toml_tag="$(cat Cargo.toml | read_version)"
|
||||||
check_tag $current_tag $file_tag
|
check_tag "$current_tag" "$toml_tag" Cargo.toml || ret=1
|
||||||
|
|
||||||
lock_file='Cargo.lock'
|
lock_tag=$(grep -A 1 '^name = "meilisearch-auth"' Cargo.lock | read_version)
|
||||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
check_tag "$current_tag" "$lock_tag" Cargo.lock || ret=1
|
||||||
check_tag $current_tag $lock_tag $lock_file
|
|
||||||
|
|
||||||
if [[ "$ret" -eq 0 ]] ; then
|
if (( ret == 0 )); then
|
||||||
echo 'OK'
|
echo 'OK'
|
||||||
fi
|
fi
|
||||||
exit $ret
|
exit $ret
|
||||||
|
|||||||
24
.github/workflows/fuzzer-indexing.yml
vendored
Normal file
24
.github/workflows/fuzzer-indexing.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: Run the indexing fuzzer
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
fuzz:
|
||||||
|
name: Setup the action
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 4320 # 72h
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
|
||||||
|
# Run benchmarks
|
||||||
|
- name: Run the fuzzer
|
||||||
|
run: |
|
||||||
|
cargo run --release --bin fuzz-indexing
|
||||||
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: Build deb package
|
- name: Build deb package
|
||||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||||
- name: Upload debian pkg to release
|
- name: Upload debian pkg to release
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/debian/meilisearch.deb
|
file: target/debian/meilisearch.deb
|
||||||
|
|||||||
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
|||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/meilisearch
|
file: target/release/meilisearch
|
||||||
@@ -87,7 +87,7 @@ jobs:
|
|||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/${{ matrix.artifact_name }}
|
file: target/release/${{ matrix.artifact_name }}
|
||||||
@@ -121,7 +121,7 @@ jobs:
|
|||||||
- name: Upload the binary to release
|
- name: Upload the binary to release
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
@@ -183,7 +183,7 @@ jobs:
|
|||||||
- name: Upload the binary to release
|
- name: Upload the binary to release
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
|||||||
7
.github/workflows/publish-docker-images.yml
vendored
7
.github/workflows/publish-docker-images.yml
vendored
@@ -58,13 +58,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
@@ -92,13 +88,10 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
builder: ${{ steps.buildx.outputs.name }}
|
|
||||||
build-args: |
|
build-args: |
|
||||||
COMMIT_SHA=${{ github.sha }}
|
COMMIT_SHA=${{ github.sha }}
|
||||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||||
GIT_TAG=${{ github.ref_name }}
|
GIT_TAG=${{ github.ref_name }}
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with Cloud team
|
# /!\ Don't touch this without checking with Cloud team
|
||||||
- name: Send CI information to Cloud team
|
- name: Send CI information to Cloud team
|
||||||
|
|||||||
42
.github/workflows/sdks-tests.yml
vendored
42
.github/workflows/sdks-tests.yml
vendored
@@ -3,6 +3,11 @@ name: SDKs tests
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
docker_image:
|
||||||
|
description: 'The Meilisearch Docker image used'
|
||||||
|
required: false
|
||||||
|
default: nightly
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
||||||
|
|
||||||
@@ -11,13 +16,28 @@ env:
|
|||||||
MEILI_NO_ANALYTICS: 'true'
|
MEILI_NO_ANALYTICS: 'true'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
define-docker-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Define the Docker image we need to use
|
||||||
|
id: define-image
|
||||||
|
run: |
|
||||||
|
event=${{ github.event_name }}
|
||||||
|
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
||||||
|
if [[ $event == 'workflow_dispatch' ]]; then
|
||||||
|
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
meilisearch-js-tests:
|
meilisearch-js-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: JS SDK tests
|
name: JS SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -47,11 +67,12 @@ jobs:
|
|||||||
run: yarn test:env:browser
|
run: yarn test:env:browser
|
||||||
|
|
||||||
instant-meilisearch-tests:
|
instant-meilisearch-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: instant-meilisearch tests
|
name: instant-meilisearch tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -73,11 +94,12 @@ jobs:
|
|||||||
run: yarn build
|
run: yarn build
|
||||||
|
|
||||||
meilisearch-php-tests:
|
meilisearch-php-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: PHP SDK tests
|
name: PHP SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -103,11 +125,12 @@ jobs:
|
|||||||
composer remove --dev guzzlehttp/guzzle http-interop/http-factory-guzzle
|
composer remove --dev guzzlehttp/guzzle http-interop/http-factory-guzzle
|
||||||
|
|
||||||
meilisearch-python-tests:
|
meilisearch-python-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: Python SDK tests
|
name: Python SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -127,11 +150,12 @@ jobs:
|
|||||||
run: pipenv run pytest
|
run: pipenv run pytest
|
||||||
|
|
||||||
meilisearch-go-tests:
|
meilisearch-go-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: Go SDK tests
|
name: Go SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -139,7 +163,7 @@ jobs:
|
|||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -156,11 +180,12 @@ jobs:
|
|||||||
run: go test -v ./...
|
run: go test -v ./...
|
||||||
|
|
||||||
meilisearch-ruby-tests:
|
meilisearch-ruby-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: Ruby SDK tests
|
name: Ruby SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -180,11 +205,12 @@ jobs:
|
|||||||
run: bundle exec rspec
|
run: bundle exec rspec
|
||||||
|
|
||||||
meilisearch-rust-tests:
|
meilisearch-rust-tests:
|
||||||
|
needs: define-docker-image
|
||||||
name: Rust SDK tests
|
name: Rust SDK tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
|
|||||||
33
.github/workflows/test-suite.yml
vendored
33
.github/workflows/test-suite.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
toolchain: nightly
|
toolchain: nightly
|
||||||
override: true
|
override: true
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -105,6 +105,29 @@ jobs:
|
|||||||
command: test
|
command: test
|
||||||
args: --workspace --locked --release --all-features
|
args: --workspace --locked --release --all-features
|
||||||
|
|
||||||
|
test-disabled-tokenization:
|
||||||
|
name: Test disabled tokenization
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ubuntu:18.04
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install --assume-yes build-essential curl
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
|
run: |
|
||||||
|
cargo tree -f '{p} {f}' -e normal --no-default-features | grep lindera -vqz
|
||||||
|
- name: Run cargo tree with default features and check lindera is pressent
|
||||||
|
run: |
|
||||||
|
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||||
|
|
||||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||||
test-debug:
|
test-debug:
|
||||||
name: Run tests in debug
|
name: Run tests in debug
|
||||||
@@ -123,7 +146,7 @@ jobs:
|
|||||||
toolchain: stable
|
toolchain: stable
|
||||||
override: true
|
override: true
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -142,7 +165,7 @@ jobs:
|
|||||||
override: true
|
override: true
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -161,7 +184,7 @@ jobs:
|
|||||||
override: true
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo fmt
|
- name: Run cargo fmt
|
||||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||||
|
|||||||
756
Cargo.lock
generated
756
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -10,10 +10,12 @@ members = [
|
|||||||
"file-store",
|
"file-store",
|
||||||
"permissive-json-pointer",
|
"permissive-json-pointer",
|
||||||
"milli",
|
"milli",
|
||||||
|
"index-stats",
|
||||||
"filter-parser",
|
"filter-parser",
|
||||||
"flatten-serde-json",
|
"flatten-serde-json",
|
||||||
"json-depth-checker",
|
"json-depth-checker",
|
||||||
"benchmarks"
|
"benchmarks",
|
||||||
|
"fuzzers",
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
# Compile
|
# Compile
|
||||||
FROM rust:alpine3.16 AS compiler
|
FROM rust:alpine3.16 AS compiler
|
||||||
|
|
||||||
@@ -12,7 +11,7 @@ ARG GIT_TAG
|
|||||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
|
||||||
COPY --link . .
|
COPY . .
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
if [ "$apkArch" = "aarch64" ]; then \
|
if [ "$apkArch" = "aarch64" ]; then \
|
||||||
@@ -31,7 +30,7 @@ RUN apk update --quiet \
|
|||||||
|
|
||||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||||
# to find.
|
# to find.
|
||||||
COPY --from=compiler --link /meilisearch/target/release/meilisearch /bin/meilisearch
|
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||||
RUN ln -s /bin/meilisearch /meilisearch
|
RUN ln -s /bin/meilisearch /meilisearch
|
||||||
|
|||||||
1376
assets/grafana-dashboard.json
Normal file
1376
assets/grafana-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
19
assets/prometheus-basic-scraper.yml
Normal file
19
assets/prometheus-basic-scraper.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
global:
|
||||||
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
|
||||||
|
# Attach these labels to any time series or alerts when communicating with
|
||||||
|
# external systems (federation, remote storage, Alertmanager).
|
||||||
|
external_labels:
|
||||||
|
monitor: 'codelab-monitor'
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
|
- job_name: 'meilisearch'
|
||||||
|
|
||||||
|
# Override the global default and scrape targets from this job every 5 seconds.
|
||||||
|
scrape_interval: 5s
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:7700']
|
||||||
54
config.toml
54
config.toml
@@ -1,131 +1,131 @@
|
|||||||
# This file shows the default configuration of Meilisearch.
|
# This file shows the default configuration of Meilisearch.
|
||||||
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
||||||
|
|
||||||
db_path = "./data.ms"
|
|
||||||
# Designates the location where database files will be created and retrieved.
|
# Designates the location where database files will be created and retrieved.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
||||||
|
db_path = "./data.ms"
|
||||||
|
|
||||||
env = "development"
|
|
||||||
# Configures the instance's environment. Value must be either `production` or `development`.
|
# Configures the instance's environment. Value must be either `production` or `development`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
||||||
|
env = "development"
|
||||||
|
|
||||||
http_addr = "localhost:7700"
|
|
||||||
# The address on which the HTTP server will listen.
|
# The address on which the HTTP server will listen.
|
||||||
|
http_addr = "localhost:7700"
|
||||||
|
|
||||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
|
||||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
||||||
|
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||||
|
|
||||||
# no_analytics = true
|
|
||||||
# Deactivates Meilisearch's built-in telemetry when provided.
|
# Deactivates Meilisearch's built-in telemetry when provided.
|
||||||
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||||
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
||||||
|
# no_analytics = true
|
||||||
|
|
||||||
http_payload_size_limit = "100 MB"
|
|
||||||
# Sets the maximum size of accepted payloads.
|
# Sets the maximum size of accepted payloads.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
||||||
|
http_payload_size_limit = "100 MB"
|
||||||
|
|
||||||
log_level = "INFO"
|
|
||||||
# Defines how much detail should be present in Meilisearch's logs.
|
# Defines how much detail should be present in Meilisearch's logs.
|
||||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
||||||
|
log_level = "INFO"
|
||||||
|
|
||||||
# max_indexing_memory = "2 GiB"
|
|
||||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
||||||
|
# max_indexing_memory = "2 GiB"
|
||||||
|
|
||||||
# max_indexing_threads = 4
|
|
||||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
||||||
|
# max_indexing_threads = 4
|
||||||
|
|
||||||
#############
|
#############
|
||||||
### DUMPS ###
|
### DUMPS ###
|
||||||
#############
|
#############
|
||||||
|
|
||||||
dump_dir = "dumps/"
|
|
||||||
# Sets the directory where Meilisearch will create dump files.
|
# Sets the directory where Meilisearch will create dump files.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
||||||
|
dump_dir = "dumps/"
|
||||||
|
|
||||||
# import_dump = "./path/to/my/file.dump"
|
|
||||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
||||||
|
# import_dump = "./path/to/my/file.dump"
|
||||||
|
|
||||||
ignore_missing_dump = false
|
|
||||||
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
||||||
|
ignore_missing_dump = false
|
||||||
|
|
||||||
ignore_dump_if_db_exists = false
|
|
||||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
||||||
|
ignore_dump_if_db_exists = false
|
||||||
|
|
||||||
|
|
||||||
#################
|
#################
|
||||||
### SNAPSHOTS ###
|
### SNAPSHOTS ###
|
||||||
#################
|
#################
|
||||||
|
|
||||||
schedule_snapshot = false
|
|
||||||
# Enables scheduled snapshots when true, disable when false (the default).
|
# Enables scheduled snapshots when true, disable when false (the default).
|
||||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||||
# between each snapshot, in seconds.
|
# between each snapshot, in seconds.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
||||||
|
schedule_snapshot = false
|
||||||
|
|
||||||
snapshot_dir = "snapshots/"
|
|
||||||
# Sets the directory where Meilisearch will store snapshots.
|
# Sets the directory where Meilisearch will store snapshots.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
||||||
|
snapshot_dir = "snapshots/"
|
||||||
|
|
||||||
# import_snapshot = "./path/to/my/snapshot"
|
|
||||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
||||||
|
# import_snapshot = "./path/to/my/snapshot"
|
||||||
|
|
||||||
ignore_missing_snapshot = false
|
|
||||||
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
||||||
|
ignore_missing_snapshot = false
|
||||||
|
|
||||||
ignore_snapshot_if_db_exists = false
|
|
||||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
||||||
|
ignore_snapshot_if_db_exists = false
|
||||||
|
|
||||||
|
|
||||||
###########
|
###########
|
||||||
### SSL ###
|
### SSL ###
|
||||||
###########
|
###########
|
||||||
|
|
||||||
# ssl_auth_path = "./path/to/root"
|
|
||||||
# Enables client authentication in the specified path.
|
# Enables client authentication in the specified path.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
||||||
|
# ssl_auth_path = "./path/to/root"
|
||||||
|
|
||||||
# ssl_cert_path = "./path/to/certfile"
|
|
||||||
# Sets the server's SSL certificates.
|
# Sets the server's SSL certificates.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
||||||
|
# ssl_cert_path = "./path/to/certfile"
|
||||||
|
|
||||||
# ssl_key_path = "./path/to/private-key"
|
|
||||||
# Sets the server's SSL key files.
|
# Sets the server's SSL key files.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
||||||
|
# ssl_key_path = "./path/to/private-key"
|
||||||
|
|
||||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
|
||||||
# Sets the server's OCSP file.
|
# Sets the server's OCSP file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
||||||
|
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||||
|
|
||||||
ssl_require_auth = false
|
|
||||||
# Makes SSL authentication mandatory.
|
# Makes SSL authentication mandatory.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
||||||
|
ssl_require_auth = false
|
||||||
|
|
||||||
ssl_resumption = false
|
|
||||||
# Activates SSL session resumption.
|
# Activates SSL session resumption.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
||||||
|
ssl_resumption = false
|
||||||
|
|
||||||
ssl_tickets = false
|
|
||||||
# Activates SSL tickets.
|
# Activates SSL tickets.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
||||||
|
ssl_tickets = false
|
||||||
|
|
||||||
#############################
|
#############################
|
||||||
### Experimental features ###
|
### Experimental features ###
|
||||||
#############################
|
#############################
|
||||||
|
|
||||||
experimental_enable_metrics = false
|
|
||||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||||
|
experimental_enable_metrics = false
|
||||||
|
|
||||||
experimental_reduce_indexing_memory_usage = false
|
|
||||||
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||||
|
experimental_reduce_indexing_memory_usage = false
|
||||||
|
|||||||
20
fuzzers/Cargo.toml
Normal file
20
fuzzers/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[package]
|
||||||
|
name = "fuzzers"
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
version.workspace = true
|
||||||
|
authors.workspace = true
|
||||||
|
description.workspace = true
|
||||||
|
homepage.workspace = true
|
||||||
|
readme.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
arbitrary = { version = "1.3.0", features = ["derive"] }
|
||||||
|
clap = { version = "4.3.0", features = ["derive"] }
|
||||||
|
fastrand = "1.9.0"
|
||||||
|
milli = { path = "../milli" }
|
||||||
|
serde = { version = "1.0.160", features = ["derive"] }
|
||||||
|
serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
||||||
|
tempfile = "3.5.0"
|
||||||
3
fuzzers/README.md
Normal file
3
fuzzers/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Fuzzers
|
||||||
|
|
||||||
|
The purpose of this crate is to contains all the handmade "fuzzer" we may need.
|
||||||
152
fuzzers/src/bin/fuzz-indexing.rs
Normal file
152
fuzzers/src/bin/fuzz-indexing.rs
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
use std::num::NonZeroUsize;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use arbitrary::{Arbitrary, Unstructured};
|
||||||
|
use clap::Parser;
|
||||||
|
use fuzzers::Operation;
|
||||||
|
use milli::heed::EnvOpenOptions;
|
||||||
|
use milli::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig};
|
||||||
|
use milli::Index;
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
#[derive(Debug, Arbitrary)]
|
||||||
|
struct Batch([Operation; 5]);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Parser)]
|
||||||
|
struct Opt {
|
||||||
|
/// The number of fuzzer to run in parallel.
|
||||||
|
#[clap(long)]
|
||||||
|
par: Option<NonZeroUsize>,
|
||||||
|
// We need to put a lot of newlines in the following documentation or else everything gets collapsed on one line
|
||||||
|
/// The path in which the databases will be created.
|
||||||
|
/// Using a ramdisk is recommended.
|
||||||
|
///
|
||||||
|
/// Linux:
|
||||||
|
///
|
||||||
|
/// sudo mount -t tmpfs -o size=2g tmpfs ramdisk # to create it
|
||||||
|
///
|
||||||
|
/// sudo umount ramdisk # to remove it
|
||||||
|
///
|
||||||
|
/// MacOS:
|
||||||
|
///
|
||||||
|
/// diskutil erasevolume HFS+ 'RAM Disk' `hdiutil attach -nobrowse -nomount ram://4194304 # create it
|
||||||
|
///
|
||||||
|
/// hdiutil detach /dev/:the_disk
|
||||||
|
#[clap(long)]
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let opt = Opt::parse();
|
||||||
|
let progression: &'static AtomicUsize = Box::leak(Box::new(AtomicUsize::new(0)));
|
||||||
|
let stop: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false)));
|
||||||
|
|
||||||
|
let par = opt.par.unwrap_or_else(|| std::thread::available_parallelism().unwrap()).get();
|
||||||
|
let mut handles = Vec::with_capacity(par);
|
||||||
|
|
||||||
|
for _ in 0..par {
|
||||||
|
let opt = opt.clone();
|
||||||
|
|
||||||
|
let handle = std::thread::spawn(move || {
|
||||||
|
let mut options = EnvOpenOptions::new();
|
||||||
|
options.map_size(1024 * 1024 * 1024 * 1024);
|
||||||
|
let tempdir = match opt.path {
|
||||||
|
Some(path) => TempDir::new_in(path).unwrap(),
|
||||||
|
None => TempDir::new().unwrap(),
|
||||||
|
};
|
||||||
|
let index = Index::new(options, tempdir.path()).unwrap();
|
||||||
|
let indexer_config = IndexerConfig::default();
|
||||||
|
let index_documents_config = IndexDocumentsConfig::default();
|
||||||
|
|
||||||
|
std::thread::scope(|s| {
|
||||||
|
loop {
|
||||||
|
if stop.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let v: Vec<u8> =
|
||||||
|
std::iter::repeat_with(|| fastrand::u8(..)).take(1000).collect();
|
||||||
|
|
||||||
|
let mut data = Unstructured::new(&v);
|
||||||
|
let batches = <[Batch; 5]>::arbitrary(&mut data).unwrap();
|
||||||
|
// will be used to display the error once a thread crashes
|
||||||
|
let dbg_input = format!("{:#?}", batches);
|
||||||
|
|
||||||
|
let handle = s.spawn(|| {
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
|
||||||
|
for batch in batches {
|
||||||
|
let mut builder = IndexDocuments::new(
|
||||||
|
&mut wtxn,
|
||||||
|
&index,
|
||||||
|
&indexer_config,
|
||||||
|
index_documents_config.clone(),
|
||||||
|
|_| (),
|
||||||
|
|| false,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
for op in batch.0 {
|
||||||
|
match op {
|
||||||
|
Operation::AddDoc(doc) => {
|
||||||
|
let documents =
|
||||||
|
milli::documents::objects_from_json_value(doc.to_d());
|
||||||
|
let documents =
|
||||||
|
milli::documents::documents_batch_reader_from_objects(
|
||||||
|
documents,
|
||||||
|
);
|
||||||
|
let (b, _added) = builder.add_documents(documents).unwrap();
|
||||||
|
builder = b;
|
||||||
|
}
|
||||||
|
Operation::DeleteDoc(id) => {
|
||||||
|
let (b, _removed) =
|
||||||
|
builder.remove_documents(vec![id.to_s()]).unwrap();
|
||||||
|
builder = b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
builder.execute().unwrap();
|
||||||
|
|
||||||
|
// after executing a batch we check if the database is corrupted
|
||||||
|
let res = index.search(&wtxn).execute().unwrap();
|
||||||
|
index.documents(&wtxn, res.documents_ids).unwrap();
|
||||||
|
progression.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
wtxn.abort().unwrap();
|
||||||
|
});
|
||||||
|
if let err @ Err(_) = handle.join() {
|
||||||
|
stop.store(true, Ordering::Relaxed);
|
||||||
|
err.expect(&dbg_input);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::thread::spawn(|| {
|
||||||
|
let mut last_value = 0;
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
loop {
|
||||||
|
let total = progression.load(Ordering::Relaxed);
|
||||||
|
let elapsed = start.elapsed().as_secs();
|
||||||
|
if elapsed > 3600 {
|
||||||
|
// after 1 hour, stop the fuzzer, success
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
println!(
|
||||||
|
"Has been running for {:?} seconds. Tested {} new values for a total of {}.",
|
||||||
|
elapsed,
|
||||||
|
total - last_value,
|
||||||
|
total
|
||||||
|
);
|
||||||
|
last_value = total;
|
||||||
|
std::thread::sleep(Duration::from_secs(1));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.join().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
46
fuzzers/src/lib.rs
Normal file
46
fuzzers/src/lib.rs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
use arbitrary::Arbitrary;
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
#[derive(Debug, Arbitrary)]
|
||||||
|
pub enum Document {
|
||||||
|
One,
|
||||||
|
Two,
|
||||||
|
Three,
|
||||||
|
Four,
|
||||||
|
Five,
|
||||||
|
Six,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Document {
|
||||||
|
pub fn to_d(&self) -> Value {
|
||||||
|
match self {
|
||||||
|
Document::One => json!({ "id": 0, "doggo": "bernese" }),
|
||||||
|
Document::Two => json!({ "id": 0, "doggo": "golden" }),
|
||||||
|
Document::Three => json!({ "id": 0, "catto": "jorts" }),
|
||||||
|
Document::Four => json!({ "id": 1, "doggo": "bernese" }),
|
||||||
|
Document::Five => json!({ "id": 1, "doggo": "golden" }),
|
||||||
|
Document::Six => json!({ "id": 1, "catto": "jorts" }),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Arbitrary)]
|
||||||
|
pub enum DocId {
|
||||||
|
Zero,
|
||||||
|
One,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocId {
|
||||||
|
pub fn to_s(&self) -> String {
|
||||||
|
match self {
|
||||||
|
DocId::Zero => "0".to_string(),
|
||||||
|
DocId::One => "1".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Arbitrary)]
|
||||||
|
pub enum Operation {
|
||||||
|
AddDoc(Document),
|
||||||
|
DeleteDoc(DocId),
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -160,7 +160,7 @@ impl BatchKind {
|
|||||||
impl BatchKind {
|
impl BatchKind {
|
||||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||||
/// The boolean tell you if an index has been created by the batched task.
|
/// The boolean tell you if an index has been created by the batched task.
|
||||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||||
/// but false can't be returned if you needs to create an index.
|
/// but false can't be returned if you needs to create an index.
|
||||||
// TODO use an AutoBatchKind as input
|
// TODO use an AutoBatchKind as input
|
||||||
pub fn new(
|
pub fn new(
|
||||||
@@ -214,7 +214,7 @@ impl BatchKind {
|
|||||||
|
|
||||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||||
/// The boolean tell you if an index has been created by the batched task.
|
/// The boolean tell you if an index has been created by the batched task.
|
||||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||||
/// but false can't be returned if you needs to create an index.
|
/// but false can't be returned if you needs to create an index.
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
||||||
@@ -321,9 +321,18 @@ impl BatchKind {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentOperation { .. },
|
BatchKind::DocumentOperation { method, allow_index_creation, primary_key, mut operation_ids },
|
||||||
K::DocumentDeletion,
|
K::DocumentDeletion,
|
||||||
) => Break(this),
|
) => {
|
||||||
|
operation_ids.push(id);
|
||||||
|
|
||||||
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
|
operation_ids,
|
||||||
|
})
|
||||||
|
}
|
||||||
// but we can't autobatch documents if it's not the same kind
|
// but we can't autobatch documents if it's not the same kind
|
||||||
// this match branch MUST be AFTER the previous one
|
// this match branch MUST be AFTER the previous one
|
||||||
(
|
(
|
||||||
@@ -346,7 +355,35 @@ impl BatchKind {
|
|||||||
deletion_ids.push(id);
|
deletion_ids.push(id);
|
||||||
Continue(BatchKind::DocumentClear { ids: deletion_ids })
|
Continue(BatchKind::DocumentClear { ids: deletion_ids })
|
||||||
}
|
}
|
||||||
// we can't autobatch a deletion and an import
|
// we can autobatch the deletion and import if the index already exists
|
||||||
|
(
|
||||||
|
BatchKind::DocumentDeletion { mut deletion_ids },
|
||||||
|
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||||
|
) if index_already_exists => {
|
||||||
|
deletion_ids.push(id);
|
||||||
|
|
||||||
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
|
operation_ids: deletion_ids,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// we can autobatch the deletion and import if both can't create an index
|
||||||
|
(
|
||||||
|
BatchKind::DocumentDeletion { mut deletion_ids },
|
||||||
|
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||||
|
) if !allow_index_creation => {
|
||||||
|
deletion_ids.push(id);
|
||||||
|
|
||||||
|
Continue(BatchKind::DocumentOperation {
|
||||||
|
method,
|
||||||
|
allow_index_creation,
|
||||||
|
primary_key,
|
||||||
|
operation_ids: deletion_ids,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// we can't autobatch a deletion and an import if the index does not exists but would be created by an addition
|
||||||
(
|
(
|
||||||
this @ BatchKind::DocumentDeletion { .. },
|
this @ BatchKind::DocumentDeletion { .. },
|
||||||
K::DocumentImport { .. }
|
K::DocumentImport { .. }
|
||||||
@@ -648,36 +685,36 @@ mod tests {
|
|||||||
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||||
|
|
||||||
// We can't autobatch document addition with document deletion
|
// We can autobatch document addition with document deletion
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
// we also can't do the only way around
|
// And the other way around
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
@@ -998,7 +998,7 @@ impl IndexScheduler {
|
|||||||
}()
|
}()
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
// The write transaction is directly owned and commited inside.
|
// The write transaction is directly owned and committed inside.
|
||||||
match self.index_mapper.delete_index(wtxn, &index_uid) {
|
match self.index_mapper.delete_index(wtxn, &index_uid) {
|
||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
Err(Error::IndexNotFound(_)) if index_has_been_created => (),
|
Err(Error::IndexNotFound(_)) if index_has_been_created => (),
|
||||||
|
|||||||
@@ -90,8 +90,17 @@ pub enum IndexStatus {
|
|||||||
pub struct IndexStats {
|
pub struct IndexStats {
|
||||||
/// Number of documents in the index.
|
/// Number of documents in the index.
|
||||||
pub number_of_documents: u64,
|
pub number_of_documents: u64,
|
||||||
/// Size of the index' DB, in bytes.
|
/// Size taken up by the index' DB, in bytes.
|
||||||
|
///
|
||||||
|
/// This includes the size taken by both the used and free pages of the DB, and as the free pages
|
||||||
|
/// are not returned to the disk after a deletion, this number is typically larger than
|
||||||
|
/// `used_database_size` that only includes the size of the used pages.
|
||||||
pub database_size: u64,
|
pub database_size: u64,
|
||||||
|
/// Size taken by the used pages of the index' DB, in bytes.
|
||||||
|
///
|
||||||
|
/// As the DB backend does not return to the disk the pages that are not currently used by the DB,
|
||||||
|
/// this value is typically smaller than `database_size`.
|
||||||
|
pub used_database_size: u64,
|
||||||
/// Association of every field name with the number of times it occurs in the documents.
|
/// Association of every field name with the number of times it occurs in the documents.
|
||||||
pub field_distribution: FieldDistribution,
|
pub field_distribution: FieldDistribution,
|
||||||
/// Creation date of the index.
|
/// Creation date of the index.
|
||||||
@@ -107,10 +116,10 @@ impl IndexStats {
|
|||||||
///
|
///
|
||||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||||
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
||||||
let database_size = index.on_disk_size()?;
|
|
||||||
Ok(IndexStats {
|
Ok(IndexStats {
|
||||||
number_of_documents: index.number_of_documents(rtxn)?,
|
number_of_documents: index.number_of_documents(rtxn)?,
|
||||||
database_size,
|
database_size: index.on_disk_size()?,
|
||||||
|
used_database_size: index.used_size()?,
|
||||||
field_distribution: index.field_distribution(rtxn)?,
|
field_distribution: index.field_distribution(rtxn)?,
|
||||||
created_at: index.created_at(rtxn)?,
|
created_at: index.created_at(rtxn)?,
|
||||||
updated_at: index.updated_at(rtxn)?,
|
updated_at: index.updated_at(rtxn)?,
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ mod uuid_codec;
|
|||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::ops::{Bound, RangeBounds};
|
use std::ops::{Bound, RangeBounds};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@@ -573,10 +573,16 @@ impl IndexScheduler {
|
|||||||
&self.index_mapper.indexer_config
|
&self.index_mapper.indexer_config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the real database size (i.e.: The size **with** the free pages)
|
||||||
pub fn size(&self) -> Result<u64> {
|
pub fn size(&self) -> Result<u64> {
|
||||||
Ok(self.env.real_disk_size()?)
|
Ok(self.env.real_disk_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the used database size (i.e.: The size **without** the free pages)
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
Ok(self.env.non_free_pages_size()?)
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the index corresponding to the name.
|
/// Return the index corresponding to the name.
|
||||||
///
|
///
|
||||||
/// * If the index wasn't opened before, the index will be opened.
|
/// * If the index wasn't opened before, the index will be opened.
|
||||||
@@ -756,6 +762,38 @@ impl IndexScheduler {
|
|||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The returned structure contains:
|
||||||
|
/// 1. The name of the property being observed can be `statuses`, `types`, or `indexes`.
|
||||||
|
/// 2. The name of the specific data related to the property can be `enqueued` for the `statuses`, `settingsUpdate` for the `types`, or the name of the index for the `indexes`, for example.
|
||||||
|
/// 3. The number of times the properties appeared.
|
||||||
|
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||||
|
let rtxn = self.read_txn()?;
|
||||||
|
|
||||||
|
let mut res = BTreeMap::new();
|
||||||
|
|
||||||
|
res.insert(
|
||||||
|
"statuses".to_string(),
|
||||||
|
enum_iterator::all::<Status>()
|
||||||
|
.map(|s| Ok((s.to_string(), self.get_status(&rtxn, s)?.len())))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
res.insert(
|
||||||
|
"types".to_string(),
|
||||||
|
enum_iterator::all::<Kind>()
|
||||||
|
.map(|s| Ok((s.to_string(), self.get_kind(&rtxn, s)?.len())))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
res.insert(
|
||||||
|
"indexes".to_string(),
|
||||||
|
self.index_tasks
|
||||||
|
.iter(&rtxn)?
|
||||||
|
.map(|res| Ok(res.map(|(name, bitmap)| (name.to_string(), bitmap.len()))?))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
/// Return true iff there is at least one task associated with this index
|
/// Return true iff there is at least one task associated with this index
|
||||||
/// that is processing.
|
/// that is processing.
|
||||||
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
||||||
@@ -1747,7 +1785,7 @@ mod tests {
|
|||||||
assert_eq!(task.kind.as_kind(), k);
|
assert_eq!(task.kind.as_kind(), k);
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "everything_is_succesfully_registered");
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "everything_is_successfully_registered");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -2037,6 +2075,105 @@ mod tests {
|
|||||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "both_task_succeeded");
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "both_task_succeeded");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_addition_and_document_deletion() {
|
||||||
|
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||||
|
|
||||||
|
let content = r#"[
|
||||||
|
{ "id": 1, "doggo": "jean bob" },
|
||||||
|
{ "id": 2, "catto": "jorts" },
|
||||||
|
{ "id": 3, "doggo": "bork" }
|
||||||
|
]"#;
|
||||||
|
|
||||||
|
let (uuid, mut file) = index_scheduler.create_update_file_with_uuid(0).unwrap();
|
||||||
|
let documents_count = read_json(content.as_bytes(), file.as_file_mut()).unwrap();
|
||||||
|
file.persist().unwrap();
|
||||||
|
index_scheduler
|
||||||
|
.register(KindWithContent::DocumentAdditionOrUpdate {
|
||||||
|
index_uid: S("doggos"),
|
||||||
|
primary_key: Some(S("id")),
|
||||||
|
method: ReplaceDocuments,
|
||||||
|
content_file: uuid,
|
||||||
|
documents_count,
|
||||||
|
allow_index_creation: true,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_first_task");
|
||||||
|
index_scheduler
|
||||||
|
.register(KindWithContent::DocumentDeletion {
|
||||||
|
index_uid: S("doggos"),
|
||||||
|
documents_ids: vec![S("1"), S("2")],
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_second_task");
|
||||||
|
|
||||||
|
handle.advance_one_successful_batch(); // The addition AND deletion should've been batched together
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_processing_the_batch");
|
||||||
|
|
||||||
|
let index = index_scheduler.index("doggos").unwrap();
|
||||||
|
let rtxn = index.read_txn().unwrap();
|
||||||
|
let field_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
|
let field_ids = field_ids_map.ids().collect::<Vec<_>>();
|
||||||
|
let documents = index
|
||||||
|
.all_documents(&rtxn)
|
||||||
|
.unwrap()
|
||||||
|
.map(|ret| obkv_to_json(&field_ids, &field_ids_map, ret.unwrap().1).unwrap())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
snapshot!(serde_json::to_string_pretty(&documents).unwrap(), name: "documents");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn document_deletion_and_document_addition() {
|
||||||
|
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||||
|
index_scheduler
|
||||||
|
.register(KindWithContent::DocumentDeletion {
|
||||||
|
index_uid: S("doggos"),
|
||||||
|
documents_ids: vec![S("1"), S("2")],
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_first_task");
|
||||||
|
|
||||||
|
let content = r#"[
|
||||||
|
{ "id": 1, "doggo": "jean bob" },
|
||||||
|
{ "id": 2, "catto": "jorts" },
|
||||||
|
{ "id": 3, "doggo": "bork" }
|
||||||
|
]"#;
|
||||||
|
|
||||||
|
let (uuid, mut file) = index_scheduler.create_update_file_with_uuid(0).unwrap();
|
||||||
|
let documents_count = read_json(content.as_bytes(), file.as_file_mut()).unwrap();
|
||||||
|
file.persist().unwrap();
|
||||||
|
index_scheduler
|
||||||
|
.register(KindWithContent::DocumentAdditionOrUpdate {
|
||||||
|
index_uid: S("doggos"),
|
||||||
|
primary_key: Some(S("id")),
|
||||||
|
method: ReplaceDocuments,
|
||||||
|
content_file: uuid,
|
||||||
|
documents_count,
|
||||||
|
allow_index_creation: true,
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_second_task");
|
||||||
|
|
||||||
|
// The deletion should have failed because it can't create an index
|
||||||
|
handle.advance_one_failed_batch();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_failing_the_deletion");
|
||||||
|
|
||||||
|
// The addition should works
|
||||||
|
handle.advance_one_successful_batch();
|
||||||
|
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_last_successful_addition");
|
||||||
|
|
||||||
|
let index = index_scheduler.index("doggos").unwrap();
|
||||||
|
let rtxn = index.read_txn().unwrap();
|
||||||
|
let field_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||||
|
let field_ids = field_ids_map.ids().collect::<Vec<_>>();
|
||||||
|
let documents = index
|
||||||
|
.all_documents(&rtxn)
|
||||||
|
.unwrap()
|
||||||
|
.map(|ret| obkv_to_json(&field_ids, &field_ids_map, ret.unwrap().1).unwrap())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
snapshot!(serde_json::to_string_pretty(&documents).unwrap(), name: "documents");
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn do_not_batch_task_of_different_indexes() {
|
fn do_not_batch_task_of_different_indexes() {
|
||||||
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
1 {uid: 1, status: succeeded, details: { received_document_ids: 2, deleted_documents: Some(2) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued []
|
||||||
|
succeeded [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [0,]
|
||||||
|
"documentDeletion" [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
[timestamp] [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
[timestamp] [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"doggo": "bork"
|
||||||
|
}
|
||||||
|
]
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
00000000-0000-0000-0000-000000000000
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
1 {uid: 1, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [0,]
|
||||||
|
"documentDeletion" [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
00000000-0000-0000-0000-000000000000
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued [1,]
|
||||||
|
failed [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [1,]
|
||||||
|
"documentDeletion" [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
00000000-0000-0000-0000-000000000000
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
1 {uid: 1, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued []
|
||||||
|
succeeded [1,]
|
||||||
|
failed [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [1,]
|
||||||
|
"documentDeletion" [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
doggos: { number_of_documents: 3, field_distribution: {"catto": 1, "doggo": 2, "id": 3} }
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"doggo": "jean bob"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"catto": "jorts"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"doggo": "bork"
|
||||||
|
}
|
||||||
|
]
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentDeletion" [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
source: index-scheduler/src/lib.rs
|
||||||
|
---
|
||||||
|
### Autobatching Enabled = true
|
||||||
|
### Processing Tasks:
|
||||||
|
[]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### All Tasks:
|
||||||
|
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||||
|
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Status:
|
||||||
|
enqueued [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Kind:
|
||||||
|
"documentAdditionOrUpdate" [1,]
|
||||||
|
"documentDeletion" [0,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Tasks:
|
||||||
|
doggos [0,1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Index Mapper:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Canceled By:
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Enqueued At:
|
||||||
|
[timestamp] [0,]
|
||||||
|
[timestamp] [1,]
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Started At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### Finished At:
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
### File Store:
|
||||||
|
00000000-0000-0000-0000-000000000000
|
||||||
|
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
@@ -466,7 +466,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
||||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletionByFilter);
|
assert_eq!(kind.as_kind(), Kind::DocumentDeletion);
|
||||||
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
||||||
ref index_uid,
|
ref index_uid,
|
||||||
ref filter_expr,
|
ref filter_expr,
|
||||||
|
|||||||
12
index-stats/Cargo.toml
Normal file
12
index-stats/Cargo.toml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
[package]
|
||||||
|
name = "index-stats"
|
||||||
|
description = "A small program that computes internal stats of a Meilisearch index"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0.71"
|
||||||
|
clap = { version = "4.3.5", features = ["derive"] }
|
||||||
|
milli = { path = "../milli" }
|
||||||
|
piechart = "1.0.0"
|
||||||
224
index-stats/src/main.rs
Normal file
224
index-stats/src/main.rs
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
use std::cmp::Reverse;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use milli::heed::{types::ByteSlice, EnvOpenOptions, PolyDatabase, RoTxn};
|
||||||
|
use milli::index::db_name::*;
|
||||||
|
use milli::index::Index;
|
||||||
|
use piechart::{Chart, Color, Data};
|
||||||
|
|
||||||
|
/// Simple program to greet a person
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// The path to the LMDB Meilisearch index database.
|
||||||
|
path: PathBuf,
|
||||||
|
|
||||||
|
/// The radius of the graphs
|
||||||
|
#[clap(long, default_value_t = 10)]
|
||||||
|
graph_radius: u16,
|
||||||
|
|
||||||
|
/// The radius of the graphs
|
||||||
|
#[clap(long, default_value_t = 6)]
|
||||||
|
graph_aspect_ratio: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let Args { path, graph_radius, graph_aspect_ratio } = Args::parse();
|
||||||
|
let env = EnvOpenOptions::new().max_dbs(24).open(path)?;
|
||||||
|
|
||||||
|
// TODO not sure to keep that...
|
||||||
|
// if removed put the pub(crate) back in the Index struct
|
||||||
|
matches!(
|
||||||
|
Option::<Index>::None,
|
||||||
|
Some(Index {
|
||||||
|
env: _,
|
||||||
|
main: _,
|
||||||
|
word_docids: _,
|
||||||
|
exact_word_docids: _,
|
||||||
|
word_prefix_docids: _,
|
||||||
|
exact_word_prefix_docids: _,
|
||||||
|
word_pair_proximity_docids: _,
|
||||||
|
word_prefix_pair_proximity_docids: _,
|
||||||
|
prefix_word_pair_proximity_docids: _,
|
||||||
|
word_position_docids: _,
|
||||||
|
word_fid_docids: _,
|
||||||
|
field_id_word_count_docids: _,
|
||||||
|
word_prefix_position_docids: _,
|
||||||
|
word_prefix_fid_docids: _,
|
||||||
|
script_language_docids: _,
|
||||||
|
facet_id_exists_docids: _,
|
||||||
|
facet_id_is_null_docids: _,
|
||||||
|
facet_id_is_empty_docids: _,
|
||||||
|
facet_id_f64_docids: _,
|
||||||
|
facet_id_string_docids: _,
|
||||||
|
field_id_docid_facet_f64s: _,
|
||||||
|
field_id_docid_facet_strings: _,
|
||||||
|
documents: _,
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut wtxn = env.write_txn()?;
|
||||||
|
let main = env.create_poly_database(&mut wtxn, Some(MAIN))?;
|
||||||
|
let word_docids = env.create_poly_database(&mut wtxn, Some(WORD_DOCIDS))?;
|
||||||
|
let exact_word_docids = env.create_poly_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?;
|
||||||
|
let word_prefix_docids = env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||||
|
let exact_word_prefix_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||||
|
let word_pair_proximity_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
|
let script_language_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
||||||
|
let word_prefix_pair_proximity_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
|
let prefix_word_pair_proximity_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
|
let word_position_docids = env.create_poly_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?;
|
||||||
|
let word_fid_docids = env.create_poly_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?;
|
||||||
|
let field_id_word_count_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
||||||
|
let word_prefix_position_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
||||||
|
let word_prefix_fid_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
||||||
|
let facet_id_f64_docids = env.create_poly_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?;
|
||||||
|
let facet_id_string_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?;
|
||||||
|
let facet_id_exists_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?;
|
||||||
|
let facet_id_is_null_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?;
|
||||||
|
let facet_id_is_empty_docids =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
||||||
|
let field_id_docid_facet_f64s =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?;
|
||||||
|
let field_id_docid_facet_strings =
|
||||||
|
env.create_poly_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
||||||
|
let documents = env.create_poly_database(&mut wtxn, Some(DOCUMENTS))?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
|
let list = [
|
||||||
|
(main, MAIN),
|
||||||
|
(word_docids, WORD_DOCIDS),
|
||||||
|
(exact_word_docids, EXACT_WORD_DOCIDS),
|
||||||
|
(word_prefix_docids, WORD_PREFIX_DOCIDS),
|
||||||
|
(exact_word_prefix_docids, EXACT_WORD_PREFIX_DOCIDS),
|
||||||
|
(word_pair_proximity_docids, WORD_PAIR_PROXIMITY_DOCIDS),
|
||||||
|
(script_language_docids, SCRIPT_LANGUAGE_DOCIDS),
|
||||||
|
(word_prefix_pair_proximity_docids, WORD_PREFIX_PAIR_PROXIMITY_DOCIDS),
|
||||||
|
(prefix_word_pair_proximity_docids, PREFIX_WORD_PAIR_PROXIMITY_DOCIDS),
|
||||||
|
(word_position_docids, WORD_POSITION_DOCIDS),
|
||||||
|
(word_fid_docids, WORD_FIELD_ID_DOCIDS),
|
||||||
|
(field_id_word_count_docids, FIELD_ID_WORD_COUNT_DOCIDS),
|
||||||
|
(word_prefix_position_docids, WORD_PREFIX_POSITION_DOCIDS),
|
||||||
|
(word_prefix_fid_docids, WORD_PREFIX_FIELD_ID_DOCIDS),
|
||||||
|
(facet_id_f64_docids, FACET_ID_F64_DOCIDS),
|
||||||
|
(facet_id_string_docids, FACET_ID_STRING_DOCIDS),
|
||||||
|
(facet_id_exists_docids, FACET_ID_EXISTS_DOCIDS),
|
||||||
|
(facet_id_is_null_docids, FACET_ID_IS_NULL_DOCIDS),
|
||||||
|
(facet_id_is_empty_docids, FACET_ID_IS_EMPTY_DOCIDS),
|
||||||
|
(field_id_docid_facet_f64s, FIELD_ID_DOCID_FACET_F64S),
|
||||||
|
(field_id_docid_facet_strings, FIELD_ID_DOCID_FACET_STRINGS),
|
||||||
|
(documents, DOCUMENTS),
|
||||||
|
];
|
||||||
|
|
||||||
|
let rtxn = env.read_txn()?;
|
||||||
|
let result: Result<Vec<_>, _> =
|
||||||
|
list.into_iter().map(|(db, name)| compute_stats(&rtxn, db).map(|s| (s, name))).collect();
|
||||||
|
let mut stats = result?;
|
||||||
|
|
||||||
|
println!("{:1$} Number of Entries", "", graph_radius as usize * 2);
|
||||||
|
stats.sort_by_key(|(s, _)| Reverse(s.number_of_entries));
|
||||||
|
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.number_of_entries as f32, *n)));
|
||||||
|
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||||
|
display_legend(&data);
|
||||||
|
print!("\r\n");
|
||||||
|
|
||||||
|
println!("{:1$} Size of Entries", "", graph_radius as usize * 2);
|
||||||
|
stats.sort_by_key(|(s, _)| Reverse(s.size_of_entries));
|
||||||
|
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_entries as f32, *n)));
|
||||||
|
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||||
|
display_legend(&data);
|
||||||
|
print!("\r\n");
|
||||||
|
|
||||||
|
println!("{:1$} Size of Data", "", graph_radius as usize * 2);
|
||||||
|
stats.sort_by_key(|(s, _)| Reverse(s.size_of_data));
|
||||||
|
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_data as f32, *n)));
|
||||||
|
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||||
|
display_legend(&data);
|
||||||
|
print!("\r\n");
|
||||||
|
|
||||||
|
println!("{:1$} Size of Keys", "", graph_radius as usize * 2);
|
||||||
|
stats.sort_by_key(|(s, _)| Reverse(s.size_of_keys));
|
||||||
|
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_keys as f32, *n)));
|
||||||
|
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||||
|
display_legend(&data);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn display_legend(data: &[Data]) {
|
||||||
|
let total: f32 = data.iter().map(|d| d.value).sum();
|
||||||
|
for Data { label, value, color, fill } in data {
|
||||||
|
println!(
|
||||||
|
"{} {} {:.02}%",
|
||||||
|
color.unwrap().paint(fill.to_string()),
|
||||||
|
label,
|
||||||
|
value / total * 100.0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_graph_data<'a>(stats: impl IntoIterator<Item = (f32, &'a str)>) -> Vec<Data> {
|
||||||
|
let mut colors = [
|
||||||
|
Color::Red,
|
||||||
|
Color::Green,
|
||||||
|
Color::Yellow,
|
||||||
|
Color::Blue,
|
||||||
|
Color::Purple,
|
||||||
|
Color::Cyan,
|
||||||
|
Color::White,
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
.cycle();
|
||||||
|
|
||||||
|
let mut characters = ['▴', '▵', '▾', '▿', '▪', '▫', '•', '◦'].into_iter().cycle();
|
||||||
|
|
||||||
|
stats
|
||||||
|
.into_iter()
|
||||||
|
.map(|(value, name)| Data {
|
||||||
|
label: (*name).into(),
|
||||||
|
value,
|
||||||
|
color: Some(colors.next().unwrap().into()),
|
||||||
|
fill: characters.next().unwrap(),
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Stats {
|
||||||
|
pub number_of_entries: u64,
|
||||||
|
pub size_of_keys: u64,
|
||||||
|
pub size_of_data: u64,
|
||||||
|
pub size_of_entries: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn compute_stats(rtxn: &RoTxn, db: PolyDatabase) -> anyhow::Result<Stats> {
|
||||||
|
let mut number_of_entries = 0;
|
||||||
|
let mut size_of_keys = 0;
|
||||||
|
let mut size_of_data = 0;
|
||||||
|
|
||||||
|
for result in db.iter::<_, ByteSlice, ByteSlice>(rtxn)? {
|
||||||
|
let (key, data) = result?;
|
||||||
|
number_of_entries += 1;
|
||||||
|
size_of_keys += key.len() as u64;
|
||||||
|
size_of_data += data.len() as u64;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Stats {
|
||||||
|
number_of_entries,
|
||||||
|
size_of_keys,
|
||||||
|
size_of_data,
|
||||||
|
size_of_entries: size_of_keys + size_of_data,
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -45,6 +45,11 @@ impl AuthController {
|
|||||||
self.store.size()
|
self.store.size()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the used size of the `AuthController` database in bytes.
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
self.store.used_size()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
||||||
match self.store.get_api_key(create_key.uid)? {
|
match self.store.get_api_key(create_key.uid)? {
|
||||||
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
||||||
|
|||||||
@@ -75,6 +75,11 @@ impl HeedAuthStore {
|
|||||||
Ok(self.env.real_disk_size()?)
|
Ok(self.env.real_disk_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the number of bytes actually used in the database
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
Ok(self.env.non_free_pages_size()?)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_drop_on_close(&mut self, v: bool) {
|
pub fn set_drop_on_close(&mut self, v: bool) {
|
||||||
self.should_close_on_drop = v;
|
self.should_close_on_drop = v;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -395,7 +395,6 @@ impl std::error::Error for ParseTaskStatusError {}
|
|||||||
pub enum Kind {
|
pub enum Kind {
|
||||||
DocumentAdditionOrUpdate,
|
DocumentAdditionOrUpdate,
|
||||||
DocumentDeletion,
|
DocumentDeletion,
|
||||||
DocumentDeletionByFilter,
|
|
||||||
SettingsUpdate,
|
SettingsUpdate,
|
||||||
IndexCreation,
|
IndexCreation,
|
||||||
IndexDeletion,
|
IndexDeletion,
|
||||||
@@ -412,7 +411,6 @@ impl Kind {
|
|||||||
match self {
|
match self {
|
||||||
Kind::DocumentAdditionOrUpdate
|
Kind::DocumentAdditionOrUpdate
|
||||||
| Kind::DocumentDeletion
|
| Kind::DocumentDeletion
|
||||||
| Kind::DocumentDeletionByFilter
|
|
||||||
| Kind::SettingsUpdate
|
| Kind::SettingsUpdate
|
||||||
| Kind::IndexCreation
|
| Kind::IndexCreation
|
||||||
| Kind::IndexDeletion
|
| Kind::IndexDeletion
|
||||||
@@ -430,7 +428,6 @@ impl Display for Kind {
|
|||||||
match self {
|
match self {
|
||||||
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
|
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
|
||||||
Kind::DocumentDeletion => write!(f, "documentDeletion"),
|
Kind::DocumentDeletion => write!(f, "documentDeletion"),
|
||||||
Kind::DocumentDeletionByFilter => write!(f, "documentDeletionByFilter"),
|
|
||||||
Kind::SettingsUpdate => write!(f, "settingsUpdate"),
|
Kind::SettingsUpdate => write!(f, "settingsUpdate"),
|
||||||
Kind::IndexCreation => write!(f, "indexCreation"),
|
Kind::IndexCreation => write!(f, "indexCreation"),
|
||||||
Kind::IndexDeletion => write!(f, "indexDeletion"),
|
Kind::IndexDeletion => write!(f, "indexDeletion"),
|
||||||
|
|||||||
@@ -4,20 +4,32 @@ use prometheus::{
|
|||||||
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
||||||
};
|
};
|
||||||
|
|
||||||
const HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: &[f64; 14] = &[
|
/// Create evenly distributed buckets
|
||||||
0.0005, 0.0008, 0.00085, 0.0009, 0.00095, 0.001, 0.00105, 0.0011, 0.00115, 0.0012, 0.0015,
|
fn create_buckets() -> [f64; 29] {
|
||||||
0.002, 0.003, 1.0,
|
(0..10)
|
||||||
];
|
.chain((10..100).step_by(10))
|
||||||
|
.chain((100..=1000).step_by(100))
|
||||||
|
.map(|i| i as f64 / 1000.)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.try_into()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
pub static ref HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: [f64; 29] = create_buckets();
|
||||||
opts!("http_requests_total", "HTTP requests total"),
|
pub static ref MEILISEARCH_HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||||
|
opts!("meilisearch_http_requests_total", "Meilisearch HTTP requests total"),
|
||||||
&["method", "path"]
|
&["method", "path"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
|
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch Db Size In Bytes"))
|
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch DB Size In Bytes"))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_USED_DB_SIZE_BYTES: IntGauge = register_int_gauge!(opts!(
|
||||||
|
"meilisearch_used_db_size_bytes",
|
||||||
|
"Meilisearch Used DB Size In Bytes"
|
||||||
|
))
|
||||||
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
@@ -26,11 +38,16 @@ lazy_static! {
|
|||||||
&["index"]
|
&["index"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
pub static ref MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
||||||
"http_response_time_seconds",
|
"http_response_time_seconds",
|
||||||
"HTTP response times",
|
"HTTP response times",
|
||||||
&["method", "path"],
|
&["method", "path"],
|
||||||
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_NB_TASKS: IntGaugeVec = register_int_gauge_vec!(
|
||||||
|
opts!("meilisearch_nb_tasks", "Meilisearch Number of tasks"),
|
||||||
|
&["kind", "value"]
|
||||||
|
)
|
||||||
|
.expect("Can't create a metric");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,11 +52,11 @@ where
|
|||||||
if is_registered_resource {
|
if is_registered_resource {
|
||||||
let request_method = req.method().to_string();
|
let request_method = req.method().to_string();
|
||||||
histogram_timer = Some(
|
histogram_timer = Some(
|
||||||
crate::metrics::HTTP_RESPONSE_TIME_SECONDS
|
crate::metrics::MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS
|
||||||
.with_label_values(&[&request_method, request_path])
|
.with_label_values(&[&request_method, request_path])
|
||||||
.start_timer(),
|
.start_timer(),
|
||||||
);
|
);
|
||||||
crate::metrics::HTTP_REQUESTS_TOTAL
|
crate::metrics::MEILISEARCH_HTTP_REQUESTS_TOTAL
|
||||||
.with_label_values(&[&request_method, request_path])
|
.with_label_values(&[&request_method, request_path])
|
||||||
.inc();
|
.inc();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
|||||||
|
|
||||||
pub async fn get_metrics(
|
pub async fn get_metrics(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
||||||
auth_controller: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<AuthController>>,
|
auth_controller: Data<AuthController>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let auth_filters = index_scheduler.filters();
|
let auth_filters = index_scheduler.filters();
|
||||||
if !auth_filters.all_indexes_authorized() {
|
if !auth_filters.all_indexes_authorized() {
|
||||||
@@ -28,10 +28,10 @@ pub async fn get_metrics(
|
|||||||
return Err(error);
|
return Err(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let response =
|
let response = create_all_stats((*index_scheduler).clone(), auth_controller, auth_filters)?;
|
||||||
create_all_stats((*index_scheduler).clone(), (*auth_controller).clone(), auth_filters)?;
|
|
||||||
|
|
||||||
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
||||||
|
crate::metrics::MEILISEARCH_USED_DB_SIZE_BYTES.set(response.used_database_size as i64);
|
||||||
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
||||||
|
|
||||||
for (index, value) in response.indexes.iter() {
|
for (index, value) in response.indexes.iter() {
|
||||||
@@ -40,6 +40,14 @@ pub async fn get_metrics(
|
|||||||
.set(value.number_of_documents as i64);
|
.set(value.number_of_documents as i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (kind, value) in index_scheduler.get_stats()? {
|
||||||
|
for (value, count) in value {
|
||||||
|
crate::metrics::MEILISEARCH_NB_TASKS
|
||||||
|
.with_label_values(&[&kind, &value])
|
||||||
|
.set(count as i64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let encoder = TextEncoder::new();
|
let encoder = TextEncoder::new();
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
||||||
|
|||||||
@@ -231,6 +231,8 @@ pub async fn running() -> HttpResponse {
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Stats {
|
pub struct Stats {
|
||||||
pub database_size: u64,
|
pub database_size: u64,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub used_database_size: u64,
|
||||||
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
||||||
pub last_update: Option<OffsetDateTime>,
|
pub last_update: Option<OffsetDateTime>,
|
||||||
pub indexes: BTreeMap<String, indexes::IndexStats>,
|
pub indexes: BTreeMap<String, indexes::IndexStats>,
|
||||||
@@ -259,6 +261,7 @@ pub fn create_all_stats(
|
|||||||
let mut last_task: Option<OffsetDateTime> = None;
|
let mut last_task: Option<OffsetDateTime> = None;
|
||||||
let mut indexes = BTreeMap::new();
|
let mut indexes = BTreeMap::new();
|
||||||
let mut database_size = 0;
|
let mut database_size = 0;
|
||||||
|
let mut used_database_size = 0;
|
||||||
|
|
||||||
for index_uid in index_scheduler.index_names()? {
|
for index_uid in index_scheduler.index_names()? {
|
||||||
// Accumulate the size of all indexes, even unauthorized ones, so
|
// Accumulate the size of all indexes, even unauthorized ones, so
|
||||||
@@ -266,6 +269,7 @@ pub fn create_all_stats(
|
|||||||
// See <https://github.com/meilisearch/meilisearch/pull/3541#discussion_r1126747643> for context.
|
// See <https://github.com/meilisearch/meilisearch/pull/3541#discussion_r1126747643> for context.
|
||||||
let stats = index_scheduler.index_stats(&index_uid)?;
|
let stats = index_scheduler.index_stats(&index_uid)?;
|
||||||
database_size += stats.inner_stats.database_size;
|
database_size += stats.inner_stats.database_size;
|
||||||
|
used_database_size += stats.inner_stats.used_database_size;
|
||||||
|
|
||||||
if !filters.is_index_authorized(&index_uid) {
|
if !filters.is_index_authorized(&index_uid) {
|
||||||
continue;
|
continue;
|
||||||
@@ -278,10 +282,14 @@ pub fn create_all_stats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
database_size += index_scheduler.size()?;
|
database_size += index_scheduler.size()?;
|
||||||
|
used_database_size += index_scheduler.used_size()?;
|
||||||
database_size += auth_controller.size()?;
|
database_size += auth_controller.size()?;
|
||||||
database_size += index_scheduler.compute_update_file_size()?;
|
used_database_size += auth_controller.used_size()?;
|
||||||
|
let update_file_size = index_scheduler.compute_update_file_size()?;
|
||||||
|
database_size += update_file_size;
|
||||||
|
used_database_size += update_file_size;
|
||||||
|
|
||||||
let stats = Stats { database_size, last_update: last_task, indexes };
|
let stats = Stats { database_size, used_database_size, last_update: last_task, indexes };
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -730,7 +730,7 @@ mod tests {
|
|||||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||||
snapshot!(meili_snap::json_string!(err), @r###"
|
snapshot!(meili_snap::json_string!(err), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
@@ -108,7 +108,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
@@ -119,7 +119,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -75,9 +75,6 @@ maplit = "1.0.2"
|
|||||||
md5 = "0.7.0"
|
md5 = "0.7.0"
|
||||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||||
|
|
||||||
[target.'cfg(fuzzing)'.dev-dependencies]
|
|
||||||
fuzzcheck = "0.12.1"
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
all-tokenizations = ["charabia/default"]
|
all-tokenizations = ["charabia/default"]
|
||||||
|
|
||||||
|
|||||||
@@ -111,7 +111,6 @@ pub enum Error {
|
|||||||
Io(#[from] io::Error),
|
Io(#[from] io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn objects_from_json_value(json: serde_json::Value) -> Vec<crate::Object> {
|
pub fn objects_from_json_value(json: serde_json::Value) -> Vec<crate::Object> {
|
||||||
let documents = match json {
|
let documents = match json {
|
||||||
object @ serde_json::Value::Object(_) => vec![object],
|
object @ serde_json::Value::Object(_) => vec![object],
|
||||||
@@ -141,7 +140,6 @@ macro_rules! documents {
|
|||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn documents_batch_reader_from_objects(
|
pub fn documents_batch_reader_from_objects(
|
||||||
objects: impl IntoIterator<Item = Object>,
|
objects: impl IntoIterator<Item = Object>,
|
||||||
) -> DocumentsBatchReader<std::io::Cursor<Vec<u8>>> {
|
) -> DocumentsBatchReader<std::io::Cursor<Vec<u8>>> {
|
||||||
|
|||||||
@@ -106,22 +106,30 @@ impl<'a> ExternalDocumentsIds<'a> {
|
|||||||
map
|
map
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return an fst of the combined hard and soft deleted ID.
|
||||||
|
pub fn to_fst<'b>(&'b self) -> fst::Result<Cow<'b, fst::Map<Cow<'a, [u8]>>>> {
|
||||||
|
if self.soft.is_empty() {
|
||||||
|
return Ok(Cow::Borrowed(&self.hard));
|
||||||
|
}
|
||||||
|
let union_op = self.hard.op().add(&self.soft).r#union();
|
||||||
|
|
||||||
|
let mut iter = union_op.into_stream();
|
||||||
|
let mut new_hard_builder = fst::MapBuilder::memory();
|
||||||
|
while let Some((external_id, marked_docids)) = iter.next() {
|
||||||
|
let value = indexed_last_value(marked_docids).unwrap();
|
||||||
|
if value != DELETED_ID {
|
||||||
|
new_hard_builder.insert(external_id, value)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(iter);
|
||||||
|
|
||||||
|
Ok(Cow::Owned(new_hard_builder.into_map().map_data(Cow::Owned)?))
|
||||||
|
}
|
||||||
|
|
||||||
fn merge_soft_into_hard(&mut self) -> fst::Result<()> {
|
fn merge_soft_into_hard(&mut self) -> fst::Result<()> {
|
||||||
if self.soft.len() >= self.hard.len() / 2 {
|
if self.soft.len() >= self.hard.len() / 2 {
|
||||||
let union_op = self.hard.op().add(&self.soft).r#union();
|
self.hard = self.to_fst()?.into_owned();
|
||||||
|
|
||||||
let mut iter = union_op.into_stream();
|
|
||||||
let mut new_hard_builder = fst::MapBuilder::memory();
|
|
||||||
while let Some((external_id, marked_docids)) = iter.next() {
|
|
||||||
let value = indexed_last_value(marked_docids).unwrap();
|
|
||||||
if value != DELETED_ID {
|
|
||||||
new_hard_builder.insert(external_id, value)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(iter);
|
|
||||||
|
|
||||||
self.hard = new_hard_builder.into_map().map_data(Cow::Owned)?;
|
|
||||||
self.soft = fst::Map::default().map_data(Cow::Owned)?;
|
self.soft = fst::Map::default().map_data(Cow::Owned)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ impl CboRoaringBitmapCodec {
|
|||||||
} else {
|
} else {
|
||||||
// Otherwise, it means we used the classic RoaringBitmapCodec and
|
// Otherwise, it means we used the classic RoaringBitmapCodec and
|
||||||
// that the header takes threshold integers.
|
// that the header takes threshold integers.
|
||||||
RoaringBitmap::deserialize_from(bytes)
|
RoaringBitmap::deserialize_unchecked_from(bytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ impl CboRoaringBitmapCodec {
|
|||||||
vec.push(integer);
|
vec.push(integer);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
roaring |= RoaringBitmap::deserialize_from(bytes.as_ref())?;
|
roaring |= RoaringBitmap::deserialize_unchecked_from(bytes.as_ref())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ impl heed::BytesDecode<'_> for RoaringBitmapCodec {
|
|||||||
type DItem = RoaringBitmap;
|
type DItem = RoaringBitmap;
|
||||||
|
|
||||||
fn bytes_decode(bytes: &[u8]) -> Option<Self::DItem> {
|
fn bytes_decode(bytes: &[u8]) -> Option<Self::DItem> {
|
||||||
RoaringBitmap::deserialize_from(bytes).ok()
|
RoaringBitmap::deserialize_unchecked_from(bytes).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,10 +21,9 @@ use crate::heed_codec::facet::{
|
|||||||
};
|
};
|
||||||
use crate::heed_codec::{ScriptLanguageCodec, StrBEU16Codec, StrRefCodec};
|
use crate::heed_codec::{ScriptLanguageCodec, StrBEU16Codec, StrRefCodec};
|
||||||
use crate::{
|
use crate::{
|
||||||
default_criteria, BEU32StrCodec, BoRoaringBitmapCodec, CboRoaringBitmapCodec, Criterion,
|
default_criteria, CboRoaringBitmapCodec, Criterion, DocumentId, ExternalDocumentsIds,
|
||||||
DocumentId, ExternalDocumentsIds, FacetDistribution, FieldDistribution, FieldId,
|
FacetDistribution, FieldDistribution, FieldId, FieldIdWordCountCodec, GeoPoint, ObkvCodec,
|
||||||
FieldIdWordCountCodec, GeoPoint, ObkvCodec, Result, RoaringBitmapCodec, RoaringBitmapLenCodec,
|
Result, RoaringBitmapCodec, RoaringBitmapLenCodec, Search, U8StrStrCodec, BEU16, BEU32,
|
||||||
Search, U8StrStrCodec, BEU16, BEU32,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const DEFAULT_MIN_WORD_LEN_ONE_TYPO: u8 = 5;
|
pub const DEFAULT_MIN_WORD_LEN_ONE_TYPO: u8 = 5;
|
||||||
@@ -94,10 +93,10 @@ pub mod db_name {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Index {
|
pub struct Index {
|
||||||
/// The LMDB environment which this index is associated with.
|
/// The LMDB environment which this index is associated with.
|
||||||
pub(crate) env: heed::Env,
|
pub env: heed::Env,
|
||||||
|
|
||||||
/// Contains many different types (e.g. the fields ids map).
|
/// Contains many different types (e.g. the fields ids map).
|
||||||
pub(crate) main: PolyDatabase,
|
pub main: PolyDatabase,
|
||||||
|
|
||||||
/// A word and all the documents ids containing the word.
|
/// A word and all the documents ids containing the word.
|
||||||
pub word_docids: Database<Str, RoaringBitmapCodec>,
|
pub word_docids: Database<Str, RoaringBitmapCodec>,
|
||||||
@@ -111,9 +110,6 @@ pub struct Index {
|
|||||||
/// A prefix of word and all the documents ids containing this prefix, from attributes for which typos are not allowed.
|
/// A prefix of word and all the documents ids containing this prefix, from attributes for which typos are not allowed.
|
||||||
pub exact_word_prefix_docids: Database<Str, RoaringBitmapCodec>,
|
pub exact_word_prefix_docids: Database<Str, RoaringBitmapCodec>,
|
||||||
|
|
||||||
/// Maps a word and a document id (u32) to all the positions where the given word appears.
|
|
||||||
pub docid_word_positions: Database<BEU32StrCodec, BoRoaringBitmapCodec>,
|
|
||||||
|
|
||||||
/// Maps the proximity between a pair of words with all the docids where this relation appears.
|
/// Maps the proximity between a pair of words with all the docids where this relation appears.
|
||||||
pub word_pair_proximity_docids: Database<U8StrStrCodec, CboRoaringBitmapCodec>,
|
pub word_pair_proximity_docids: Database<U8StrStrCodec, CboRoaringBitmapCodec>,
|
||||||
/// Maps the proximity between a pair of word and prefix with all the docids where this relation appears.
|
/// Maps the proximity between a pair of word and prefix with all the docids where this relation appears.
|
||||||
@@ -154,7 +150,7 @@ pub struct Index {
|
|||||||
pub field_id_docid_facet_strings: Database<FieldDocIdFacetStringCodec, Str>,
|
pub field_id_docid_facet_strings: Database<FieldDocIdFacetStringCodec, Str>,
|
||||||
|
|
||||||
/// Maps the document id to the document as an obkv store.
|
/// Maps the document id to the document as an obkv store.
|
||||||
pub(crate) documents: Database<OwnedType<BEU32>, ObkvCodec>,
|
pub documents: Database<OwnedType<BEU32>, ObkvCodec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
@@ -177,7 +173,6 @@ impl Index {
|
|||||||
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||||
let exact_word_prefix_docids =
|
let exact_word_prefix_docids =
|
||||||
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||||
let docid_word_positions = env.create_database(&mut wtxn, Some(DOCID_WORD_POSITIONS))?;
|
|
||||||
let word_pair_proximity_docids =
|
let word_pair_proximity_docids =
|
||||||
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
let script_language_docids =
|
let script_language_docids =
|
||||||
@@ -220,7 +215,6 @@ impl Index {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
script_language_docids,
|
script_language_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
@@ -1472,9 +1466,9 @@ pub(crate) mod tests {
|
|||||||
|
|
||||||
db_snap!(index, field_distribution,
|
db_snap!(index, field_distribution,
|
||||||
@r###"
|
@r###"
|
||||||
age 1
|
age 1 |
|
||||||
id 2
|
id 2 |
|
||||||
name 2
|
name 2 |
|
||||||
"###
|
"###
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -1492,9 +1486,9 @@ pub(crate) mod tests {
|
|||||||
|
|
||||||
db_snap!(index, field_distribution,
|
db_snap!(index, field_distribution,
|
||||||
@r###"
|
@r###"
|
||||||
age 1
|
age 1 |
|
||||||
id 2
|
id 2 |
|
||||||
name 2
|
name 2 |
|
||||||
"###
|
"###
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -1508,9 +1502,9 @@ pub(crate) mod tests {
|
|||||||
|
|
||||||
db_snap!(index, field_distribution,
|
db_snap!(index, field_distribution,
|
||||||
@r###"
|
@r###"
|
||||||
has_dog 1
|
has_dog 1 |
|
||||||
id 2
|
id 2 |
|
||||||
name 2
|
name 2 |
|
||||||
"###
|
"###
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,52 +5,6 @@
|
|||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||||
|
|
||||||
// #[cfg(test)]
|
|
||||||
// pub mod allocator {
|
|
||||||
// use std::alloc::{GlobalAlloc, System};
|
|
||||||
// use std::sync::atomic::{self, AtomicI64};
|
|
||||||
|
|
||||||
// #[global_allocator]
|
|
||||||
// pub static ALLOC: CountingAlloc = CountingAlloc {
|
|
||||||
// max_resident: AtomicI64::new(0),
|
|
||||||
// resident: AtomicI64::new(0),
|
|
||||||
// allocated: AtomicI64::new(0),
|
|
||||||
// };
|
|
||||||
|
|
||||||
// pub struct CountingAlloc {
|
|
||||||
// pub max_resident: AtomicI64,
|
|
||||||
// pub resident: AtomicI64,
|
|
||||||
// pub allocated: AtomicI64,
|
|
||||||
// }
|
|
||||||
// unsafe impl GlobalAlloc for CountingAlloc {
|
|
||||||
// unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
|
|
||||||
// self.allocated.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
|
||||||
// let old_resident =
|
|
||||||
// self.resident.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
|
||||||
|
|
||||||
// let resident = old_resident + layout.size() as i64;
|
|
||||||
// self.max_resident.fetch_max(resident, atomic::Ordering::SeqCst);
|
|
||||||
|
|
||||||
// // if layout.size() > 1_000_000 {
|
|
||||||
// // eprintln!(
|
|
||||||
// // "allocating {} with new resident size: {resident}",
|
|
||||||
// // layout.size() / 1_000_000
|
|
||||||
// // );
|
|
||||||
// // // let trace = std::backtrace::Backtrace::capture();
|
|
||||||
// // // let t = trace.to_string();
|
|
||||||
// // // eprintln!("{t}");
|
|
||||||
// // }
|
|
||||||
|
|
||||||
// System.alloc(layout)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
|
|
||||||
// self.resident.fetch_sub(layout.size() as i64, atomic::Ordering::Relaxed);
|
|
||||||
// System.dealloc(ptr, layout)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod documents;
|
pub mod documents;
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,6 @@ pub fn apply_distinct_rule(
|
|||||||
ctx: &mut SearchContext,
|
ctx: &mut SearchContext,
|
||||||
field_id: u16,
|
field_id: u16,
|
||||||
candidates: &RoaringBitmap,
|
candidates: &RoaringBitmap,
|
||||||
// TODO: add a universe here, such that the `excluded` are a subset of the universe?
|
|
||||||
) -> Result<DistinctOutput> {
|
) -> Result<DistinctOutput> {
|
||||||
let mut excluded = RoaringBitmap::new();
|
let mut excluded = RoaringBitmap::new();
|
||||||
let mut remaining = RoaringBitmap::new();
|
let mut remaining = RoaringBitmap::new();
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ impl State {
|
|||||||
)?;
|
)?;
|
||||||
intersection &= &candidates;
|
intersection &= &candidates;
|
||||||
if !intersection.is_empty() {
|
if !intersection.is_empty() {
|
||||||
// TODO: although not really worth it in terms of performance,
|
// Although not really worth it in terms of performance,
|
||||||
// if would be good to put this in cache for the sake of consistency
|
// if would be good to put this in cache for the sake of consistency
|
||||||
let candidates_with_exact_word_count = if count_all_positions < u8::MAX as usize {
|
let candidates_with_exact_word_count = if count_all_positions < u8::MAX as usize {
|
||||||
ctx.index
|
ctx.index
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ impl<T> Interned<T> {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct DedupInterner<T> {
|
pub struct DedupInterner<T> {
|
||||||
stable_store: Vec<T>,
|
stable_store: Vec<T>,
|
||||||
lookup: FxHashMap<T, Interned<T>>, // TODO: Arc
|
lookup: FxHashMap<T, Interned<T>>,
|
||||||
}
|
}
|
||||||
impl<T> Default for DedupInterner<T> {
|
impl<T> Default for DedupInterner<T> {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
/// Maximum number of tokens we consider in a single search.
|
/// Maximum number of tokens we consider in a single search.
|
||||||
// TODO: Loic, find proper value here so we don't overflow the interner.
|
|
||||||
pub const MAX_TOKEN_COUNT: usize = 1_000;
|
pub const MAX_TOKEN_COUNT: usize = 1_000;
|
||||||
|
|
||||||
/// Maximum number of prefixes that can be derived from a single word.
|
/// Maximum number of prefixes that can be derived from a single word.
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ impl QueryGraph {
|
|||||||
/// which contains ngrams.
|
/// which contains ngrams.
|
||||||
pub fn from_query(
|
pub fn from_query(
|
||||||
ctx: &mut SearchContext,
|
ctx: &mut SearchContext,
|
||||||
// NOTE: the terms here must be consecutive
|
// The terms here must be consecutive
|
||||||
terms: &[LocatedQueryTerm],
|
terms: &[LocatedQueryTerm],
|
||||||
) -> Result<(QueryGraph, Vec<LocatedQueryTerm>)> {
|
) -> Result<(QueryGraph, Vec<LocatedQueryTerm>)> {
|
||||||
let mut new_located_query_terms = terms.to_vec();
|
let mut new_located_query_terms = terms.to_vec();
|
||||||
@@ -103,7 +103,7 @@ impl QueryGraph {
|
|||||||
let root_node = 0;
|
let root_node = 0;
|
||||||
let end_node = 1;
|
let end_node = 1;
|
||||||
|
|
||||||
// TODO: we could consider generalizing to 4,5,6,7,etc. ngrams
|
// Ee could consider generalizing to 4,5,6,7,etc. ngrams
|
||||||
let (mut prev2, mut prev1, mut prev0): (Vec<u16>, Vec<u16>, Vec<u16>) =
|
let (mut prev2, mut prev1, mut prev0): (Vec<u16>, Vec<u16>, Vec<u16>) =
|
||||||
(vec![], vec![], vec![root_node]);
|
(vec![], vec![], vec![root_node]);
|
||||||
|
|
||||||
|
|||||||
@@ -132,7 +132,6 @@ impl QueryTermSubset {
|
|||||||
if full_query_term.ngram_words.is_some() {
|
if full_query_term.ngram_words.is_some() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
// TODO: included in subset
|
|
||||||
if let Some(phrase) = full_query_term.zero_typo.phrase {
|
if let Some(phrase) = full_query_term.zero_typo.phrase {
|
||||||
self.zero_typo_subset.contains_phrase(phrase).then_some(ExactTerm::Phrase(phrase))
|
self.zero_typo_subset.contains_phrase(phrase).then_some(ExactTerm::Phrase(phrase))
|
||||||
} else if let Some(word) = full_query_term.zero_typo.exact {
|
} else if let Some(word) = full_query_term.zero_typo.exact {
|
||||||
@@ -182,7 +181,6 @@ impl QueryTermSubset {
|
|||||||
let word = match &self.zero_typo_subset {
|
let word = match &self.zero_typo_subset {
|
||||||
NTypoTermSubset::All => Some(use_prefix_db),
|
NTypoTermSubset::All => Some(use_prefix_db),
|
||||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||||
// TODO: use a subset of prefix words instead
|
|
||||||
if words.contains(&use_prefix_db) {
|
if words.contains(&use_prefix_db) {
|
||||||
Some(use_prefix_db)
|
Some(use_prefix_db)
|
||||||
} else {
|
} else {
|
||||||
@@ -204,7 +202,6 @@ impl QueryTermSubset {
|
|||||||
ctx: &mut SearchContext,
|
ctx: &mut SearchContext,
|
||||||
) -> Result<BTreeSet<Word>> {
|
) -> Result<BTreeSet<Word>> {
|
||||||
let mut result = BTreeSet::default();
|
let mut result = BTreeSet::default();
|
||||||
// TODO: a compute_partially funtion
|
|
||||||
if !self.one_typo_subset.is_empty() || !self.two_typo_subset.is_empty() {
|
if !self.one_typo_subset.is_empty() || !self.two_typo_subset.is_empty() {
|
||||||
self.original.compute_fully_if_needed(ctx)?;
|
self.original.compute_fully_if_needed(ctx)?;
|
||||||
}
|
}
|
||||||
@@ -300,7 +297,6 @@ impl QueryTermSubset {
|
|||||||
let mut result = BTreeSet::default();
|
let mut result = BTreeSet::default();
|
||||||
|
|
||||||
if !self.one_typo_subset.is_empty() {
|
if !self.one_typo_subset.is_empty() {
|
||||||
// TODO: compute less than fully if possible
|
|
||||||
self.original.compute_fully_if_needed(ctx)?;
|
self.original.compute_fully_if_needed(ctx)?;
|
||||||
}
|
}
|
||||||
let original = ctx.term_interner.get_mut(self.original);
|
let original = ctx.term_interner.get_mut(self.original);
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ pub fn located_query_terms_from_tokens(
|
|||||||
TokenKind::Separator(separator_kind) => {
|
TokenKind::Separator(separator_kind) => {
|
||||||
// add penalty for hard separators
|
// add penalty for hard separators
|
||||||
if let SeparatorKind::Hard = separator_kind {
|
if let SeparatorKind::Hard = separator_kind {
|
||||||
position = position.wrapping_add(1);
|
position = position.wrapping_add(7);
|
||||||
}
|
}
|
||||||
|
|
||||||
phrase = 'phrase: {
|
phrase = 'phrase: {
|
||||||
@@ -139,7 +139,6 @@ pub fn number_of_typos_allowed<'ctx>(
|
|||||||
let min_len_one_typo = ctx.index.min_word_len_one_typo(ctx.txn)?;
|
let min_len_one_typo = ctx.index.min_word_len_one_typo(ctx.txn)?;
|
||||||
let min_len_two_typos = ctx.index.min_word_len_two_typos(ctx.txn)?;
|
let min_len_two_typos = ctx.index.min_word_len_two_typos(ctx.txn)?;
|
||||||
|
|
||||||
// TODO: should `exact_words` also disable prefix search, ngrams, split words, or synonyms?
|
|
||||||
let exact_words = ctx.index.exact_words(ctx.txn)?;
|
let exact_words = ctx.index.exact_words(ctx.txn)?;
|
||||||
|
|
||||||
Ok(Box::new(move |word: &str| {
|
Ok(Box::new(move |word: &str| {
|
||||||
@@ -250,8 +249,6 @@ impl PhraseBuilder {
|
|||||||
} else {
|
} else {
|
||||||
// token has kind Word
|
// token has kind Word
|
||||||
let word = ctx.word_interner.insert(token.lemma().to_string());
|
let word = ctx.word_interner.insert(token.lemma().to_string());
|
||||||
// TODO: in a phrase, check that every word exists
|
|
||||||
// otherwise return an empty term
|
|
||||||
self.words.push(Some(word));
|
self.words.push(Some(word));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,48 @@
|
|||||||
#![allow(clippy::too_many_arguments)]
|
/** Implements a "PathVisitor" which finds all paths of a certain cost
|
||||||
|
from the START to END node of a ranking rule graph.
|
||||||
|
|
||||||
|
A path is a list of conditions. A condition is the data associated with
|
||||||
|
an edge, given by the ranking rule. Some edges don't have a condition associated
|
||||||
|
with them, they are "unconditional". These kinds of edges are used to "skip" a node.
|
||||||
|
|
||||||
|
The algorithm uses a depth-first search. It benefits from two main optimisations:
|
||||||
|
- The list of all possible costs to go from any node to the END node is precomputed
|
||||||
|
- The `DeadEndsCache` reduces the number of valid paths drastically, by making some edges
|
||||||
|
untraversable depending on what other edges were selected.
|
||||||
|
|
||||||
|
These two optimisations are meant to avoid traversing edges that wouldn't lead
|
||||||
|
to a valid path. In practically all cases, we avoid the exponential complexity
|
||||||
|
that is inherent to depth-first search in a large ranking rule graph.
|
||||||
|
|
||||||
|
The DeadEndsCache is a sort of prefix tree which associates a list of forbidden
|
||||||
|
conditions to a list of traversed conditions.
|
||||||
|
For example, the DeadEndsCache could say the following:
|
||||||
|
- Immediately, from the start, the conditions `[a,b]` are forbidden
|
||||||
|
- if we take the condition `c`, then the conditions `[e]` are also forbidden
|
||||||
|
- and if after that, we take `f`, then `[h,i]` are also forbidden
|
||||||
|
- etc.
|
||||||
|
- if we take `g`, then `[f]` is also forbidden
|
||||||
|
- etc.
|
||||||
|
- etc.
|
||||||
|
As we traverse the graph, we also traverse the `DeadEndsCache` and keep a list of forbidden
|
||||||
|
conditions in memory. Then, we know to avoid all edges which have a condition that is forbidden.
|
||||||
|
|
||||||
|
When a path is found from START to END, we give it to the `visit` closure.
|
||||||
|
This closure takes a mutable reference to the `DeadEndsCache`. This means that
|
||||||
|
the caller can update this cache. Therefore, we must handle the case where the
|
||||||
|
DeadEndsCache has been updated. This means potentially backtracking up to the point
|
||||||
|
where the traversed conditions are all allowed by the new DeadEndsCache.
|
||||||
|
|
||||||
|
The algorithm also implements the `TermsMatchingStrategy` logic.
|
||||||
|
Some edges are augmented with a list of "nodes_to_skip". Skipping
|
||||||
|
a node means "reaching this node through an unconditional edge". If we have
|
||||||
|
already traversed (ie. not skipped) a node that is in this list, then we know that we
|
||||||
|
can't traverse this edge. Otherwise, we traverse the edge but make sure to skip any
|
||||||
|
future node that was present in the "nodes_to_skip" list.
|
||||||
|
|
||||||
|
The caller can decide to stop the path finding algorithm
|
||||||
|
by returning a `ControlFlow::Break` from the `visit` closure.
|
||||||
|
*/
|
||||||
use std::collections::{BTreeSet, VecDeque};
|
use std::collections::{BTreeSet, VecDeque};
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use std::ops::ControlFlow;
|
use std::ops::ControlFlow;
|
||||||
@@ -12,30 +55,41 @@ use crate::search::new::query_graph::QueryNode;
|
|||||||
use crate::search::new::small_bitmap::SmallBitmap;
|
use crate::search::new::small_bitmap::SmallBitmap;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
|
/// Closure which processes a path found by the `PathVisitor`
|
||||||
type VisitFn<'f, G> = &'f mut dyn FnMut(
|
type VisitFn<'f, G> = &'f mut dyn FnMut(
|
||||||
|
// the path as a list of conditions
|
||||||
&[Interned<<G as RankingRuleGraphTrait>::Condition>],
|
&[Interned<<G as RankingRuleGraphTrait>::Condition>],
|
||||||
&mut RankingRuleGraph<G>,
|
&mut RankingRuleGraph<G>,
|
||||||
|
// a mutable reference to the DeadEndsCache, to update it in case the given
|
||||||
|
// path doesn't resolve to any valid document ids
|
||||||
&mut DeadEndsCache<<G as RankingRuleGraphTrait>::Condition>,
|
&mut DeadEndsCache<<G as RankingRuleGraphTrait>::Condition>,
|
||||||
) -> Result<ControlFlow<()>>;
|
) -> Result<ControlFlow<()>>;
|
||||||
|
|
||||||
|
/// A structure which is kept but not updated during the traversal of the graph.
|
||||||
|
/// It can however be updated by the `visit` closure once a valid path has been found.
|
||||||
struct VisitorContext<'a, G: RankingRuleGraphTrait> {
|
struct VisitorContext<'a, G: RankingRuleGraphTrait> {
|
||||||
graph: &'a mut RankingRuleGraph<G>,
|
graph: &'a mut RankingRuleGraph<G>,
|
||||||
all_costs_from_node: &'a MappedInterner<QueryNode, Vec<u64>>,
|
all_costs_from_node: &'a MappedInterner<QueryNode, Vec<u64>>,
|
||||||
dead_ends_cache: &'a mut DeadEndsCache<G::Condition>,
|
dead_ends_cache: &'a mut DeadEndsCache<G::Condition>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The internal state of the traversal algorithm
|
||||||
struct VisitorState<G: RankingRuleGraphTrait> {
|
struct VisitorState<G: RankingRuleGraphTrait> {
|
||||||
|
/// Budget from the current node to the end node
|
||||||
remaining_cost: u64,
|
remaining_cost: u64,
|
||||||
|
/// Previously visited conditions, in order.
|
||||||
path: Vec<Interned<G::Condition>>,
|
path: Vec<Interned<G::Condition>>,
|
||||||
|
/// Previously visited conditions, as an efficient and compact set.
|
||||||
visited_conditions: SmallBitmap<G::Condition>,
|
visited_conditions: SmallBitmap<G::Condition>,
|
||||||
|
/// Previously visited (ie not skipped) nodes, as an efficient and compact set.
|
||||||
visited_nodes: SmallBitmap<QueryNode>,
|
visited_nodes: SmallBitmap<QueryNode>,
|
||||||
|
/// The conditions that cannot be visited anymore
|
||||||
forbidden_conditions: SmallBitmap<G::Condition>,
|
forbidden_conditions: SmallBitmap<G::Condition>,
|
||||||
forbidden_conditions_to_nodes: SmallBitmap<QueryNode>,
|
/// The nodes that cannot be visited anymore (they must be skipped)
|
||||||
|
nodes_to_skip: SmallBitmap<QueryNode>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// See module documentation
|
||||||
pub struct PathVisitor<'a, G: RankingRuleGraphTrait> {
|
pub struct PathVisitor<'a, G: RankingRuleGraphTrait> {
|
||||||
state: VisitorState<G>,
|
state: VisitorState<G>,
|
||||||
ctx: VisitorContext<'a, G>,
|
ctx: VisitorContext<'a, G>,
|
||||||
@@ -56,14 +110,13 @@ impl<'a, G: RankingRuleGraphTrait> PathVisitor<'a, G> {
|
|||||||
forbidden_conditions: SmallBitmap::for_interned_values_in(
|
forbidden_conditions: SmallBitmap::for_interned_values_in(
|
||||||
&graph.conditions_interner,
|
&graph.conditions_interner,
|
||||||
),
|
),
|
||||||
forbidden_conditions_to_nodes: SmallBitmap::for_interned_values_in(
|
nodes_to_skip: SmallBitmap::for_interned_values_in(&graph.query_graph.nodes),
|
||||||
&graph.query_graph.nodes,
|
|
||||||
),
|
|
||||||
},
|
},
|
||||||
ctx: VisitorContext { graph, all_costs_from_node, dead_ends_cache },
|
ctx: VisitorContext { graph, all_costs_from_node, dead_ends_cache },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// See module documentation
|
||||||
pub fn visit_paths(mut self, visit: VisitFn<G>) -> Result<()> {
|
pub fn visit_paths(mut self, visit: VisitFn<G>) -> Result<()> {
|
||||||
let _ =
|
let _ =
|
||||||
self.state.visit_node(self.ctx.graph.query_graph.root_node, visit, &mut self.ctx)?;
|
self.state.visit_node(self.ctx.graph.query_graph.root_node, visit, &mut self.ctx)?;
|
||||||
@@ -72,22 +125,31 @@ impl<'a, G: RankingRuleGraphTrait> PathVisitor<'a, G> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||||
|
/// Visits a node: traverse all its valid conditional and unconditional edges.
|
||||||
|
///
|
||||||
|
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||||
|
/// Returns whether a valid path was found from this node otherwise.
|
||||||
fn visit_node(
|
fn visit_node(
|
||||||
&mut self,
|
&mut self,
|
||||||
from_node: Interned<QueryNode>,
|
from_node: Interned<QueryNode>,
|
||||||
visit: VisitFn<G>,
|
visit: VisitFn<G>,
|
||||||
ctx: &mut VisitorContext<G>,
|
ctx: &mut VisitorContext<G>,
|
||||||
) -> Result<ControlFlow<(), bool>> {
|
) -> Result<ControlFlow<(), bool>> {
|
||||||
|
// any valid path will be found from this point
|
||||||
|
// if a valid path was found, then we know that the DeadEndsCache may have been updated,
|
||||||
|
// and we will need to do more work to potentially backtrack
|
||||||
let mut any_valid = false;
|
let mut any_valid = false;
|
||||||
|
|
||||||
let edges = ctx.graph.edges_of_node.get(from_node).clone();
|
let edges = ctx.graph.edges_of_node.get(from_node).clone();
|
||||||
for edge_idx in edges.iter() {
|
for edge_idx in edges.iter() {
|
||||||
|
// could be none if the edge was deleted
|
||||||
let Some(edge) = ctx.graph.edges_store.get(edge_idx).clone() else { continue };
|
let Some(edge) = ctx.graph.edges_store.get(edge_idx).clone() else { continue };
|
||||||
|
|
||||||
if self.remaining_cost < edge.cost as u64 {
|
if self.remaining_cost < edge.cost as u64 {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
self.remaining_cost -= edge.cost as u64;
|
self.remaining_cost -= edge.cost as u64;
|
||||||
|
|
||||||
let cf = match edge.condition {
|
let cf = match edge.condition {
|
||||||
Some(condition) => self.visit_condition(
|
Some(condition) => self.visit_condition(
|
||||||
condition,
|
condition,
|
||||||
@@ -119,6 +181,10 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
|||||||
Ok(ControlFlow::Continue(any_valid))
|
Ok(ControlFlow::Continue(any_valid))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Visits an unconditional edge.
|
||||||
|
///
|
||||||
|
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||||
|
/// Returns whether a valid path was found from this node otherwise.
|
||||||
fn visit_no_condition(
|
fn visit_no_condition(
|
||||||
&mut self,
|
&mut self,
|
||||||
dest_node: Interned<QueryNode>,
|
dest_node: Interned<QueryNode>,
|
||||||
@@ -134,20 +200,29 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
|||||||
{
|
{
|
||||||
return Ok(ControlFlow::Continue(false));
|
return Ok(ControlFlow::Continue(false));
|
||||||
}
|
}
|
||||||
|
// We've reached the END node!
|
||||||
if dest_node == ctx.graph.query_graph.end_node {
|
if dest_node == ctx.graph.query_graph.end_node {
|
||||||
let control_flow = visit(&self.path, ctx.graph, ctx.dead_ends_cache)?;
|
let control_flow = visit(&self.path, ctx.graph, ctx.dead_ends_cache)?;
|
||||||
|
// We could change the return type of the visit closure such that the caller
|
||||||
|
// tells us whether the dead ends cache was updated or not.
|
||||||
|
// Alternatively, maybe the DeadEndsCache should have a generation number
|
||||||
|
// to it, so that we don't need to play with these booleans at all.
|
||||||
match control_flow {
|
match control_flow {
|
||||||
ControlFlow::Continue(_) => Ok(ControlFlow::Continue(true)),
|
ControlFlow::Continue(_) => Ok(ControlFlow::Continue(true)),
|
||||||
ControlFlow::Break(_) => Ok(ControlFlow::Break(())),
|
ControlFlow::Break(_) => Ok(ControlFlow::Break(())),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let old_fbct = self.forbidden_conditions_to_nodes.clone();
|
let old_fbct = self.nodes_to_skip.clone();
|
||||||
self.forbidden_conditions_to_nodes.union(edge_new_nodes_to_skip);
|
self.nodes_to_skip.union(edge_new_nodes_to_skip);
|
||||||
let cf = self.visit_node(dest_node, visit, ctx)?;
|
let cf = self.visit_node(dest_node, visit, ctx)?;
|
||||||
self.forbidden_conditions_to_nodes = old_fbct;
|
self.nodes_to_skip = old_fbct;
|
||||||
Ok(cf)
|
Ok(cf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// Visits a conditional edge.
|
||||||
|
///
|
||||||
|
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||||
|
/// Returns whether a valid path was found from this node otherwise.
|
||||||
fn visit_condition(
|
fn visit_condition(
|
||||||
&mut self,
|
&mut self,
|
||||||
condition: Interned<G::Condition>,
|
condition: Interned<G::Condition>,
|
||||||
@@ -159,7 +234,7 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
|||||||
assert!(dest_node != ctx.graph.query_graph.end_node);
|
assert!(dest_node != ctx.graph.query_graph.end_node);
|
||||||
|
|
||||||
if self.forbidden_conditions.contains(condition)
|
if self.forbidden_conditions.contains(condition)
|
||||||
|| self.forbidden_conditions_to_nodes.contains(dest_node)
|
|| self.nodes_to_skip.contains(dest_node)
|
||||||
|| edge_new_nodes_to_skip.intersects(&self.visited_nodes)
|
|| edge_new_nodes_to_skip.intersects(&self.visited_nodes)
|
||||||
{
|
{
|
||||||
return Ok(ControlFlow::Continue(false));
|
return Ok(ControlFlow::Continue(false));
|
||||||
@@ -180,19 +255,19 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
|||||||
self.visited_nodes.insert(dest_node);
|
self.visited_nodes.insert(dest_node);
|
||||||
self.visited_conditions.insert(condition);
|
self.visited_conditions.insert(condition);
|
||||||
|
|
||||||
let old_fc = self.forbidden_conditions.clone();
|
let old_forb_cond = self.forbidden_conditions.clone();
|
||||||
if let Some(next_forbidden) =
|
if let Some(next_forbidden) =
|
||||||
ctx.dead_ends_cache.forbidden_conditions_after_prefix(self.path.iter().copied())
|
ctx.dead_ends_cache.forbidden_conditions_after_prefix(self.path.iter().copied())
|
||||||
{
|
{
|
||||||
self.forbidden_conditions.union(&next_forbidden);
|
self.forbidden_conditions.union(&next_forbidden);
|
||||||
}
|
}
|
||||||
let old_fctn = self.forbidden_conditions_to_nodes.clone();
|
let old_nodes_to_skip = self.nodes_to_skip.clone();
|
||||||
self.forbidden_conditions_to_nodes.union(edge_new_nodes_to_skip);
|
self.nodes_to_skip.union(edge_new_nodes_to_skip);
|
||||||
|
|
||||||
let cf = self.visit_node(dest_node, visit, ctx)?;
|
let cf = self.visit_node(dest_node, visit, ctx)?;
|
||||||
|
|
||||||
self.forbidden_conditions_to_nodes = old_fctn;
|
self.nodes_to_skip = old_nodes_to_skip;
|
||||||
self.forbidden_conditions = old_fc;
|
self.forbidden_conditions = old_forb_cond;
|
||||||
|
|
||||||
self.visited_conditions.remove(condition);
|
self.visited_conditions.remove(condition);
|
||||||
self.visited_nodes.remove(dest_node);
|
self.visited_nodes.remove(dest_node);
|
||||||
|
|||||||
@@ -9,12 +9,8 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
|
|||||||
use crate::search::new::SearchContext;
|
use crate::search::new::SearchContext;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
// TODO: give a generation to each universe, then be able to get the exact
|
|
||||||
// delta of docids between two universes of different generations!
|
|
||||||
|
|
||||||
/// A cache storing the document ids associated with each ranking rule edge
|
/// A cache storing the document ids associated with each ranking rule edge
|
||||||
pub struct ConditionDocIdsCache<G: RankingRuleGraphTrait> {
|
pub struct ConditionDocIdsCache<G: RankingRuleGraphTrait> {
|
||||||
// TOOD: should be a mapped interner?
|
|
||||||
pub cache: FxHashMap<Interned<G::Condition>, ComputedCondition>,
|
pub cache: FxHashMap<Interned<G::Condition>, ComputedCondition>,
|
||||||
_phantom: PhantomData<G>,
|
_phantom: PhantomData<G>,
|
||||||
}
|
}
|
||||||
@@ -54,7 +50,7 @@ impl<G: RankingRuleGraphTrait> ConditionDocIdsCache<G> {
|
|||||||
}
|
}
|
||||||
let condition = graph.conditions_interner.get_mut(interned_condition);
|
let condition = graph.conditions_interner.get_mut(interned_condition);
|
||||||
let computed = G::resolve_condition(ctx, condition, universe)?;
|
let computed = G::resolve_condition(ctx, condition, universe)?;
|
||||||
// TODO: if computed.universe_len != universe.len() ?
|
// Can we put an assert here for computed.universe_len == universe.len() ?
|
||||||
let _ = self.cache.insert(interned_condition, computed);
|
let _ = self.cache.insert(interned_condition, computed);
|
||||||
let computed = &self.cache[&interned_condition];
|
let computed = &self.cache[&interned_condition];
|
||||||
Ok(computed)
|
Ok(computed)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ use crate::search::new::interner::{FixedSizeInterner, Interned};
|
|||||||
use crate::search::new::small_bitmap::SmallBitmap;
|
use crate::search::new::small_bitmap::SmallBitmap;
|
||||||
|
|
||||||
pub struct DeadEndsCache<T> {
|
pub struct DeadEndsCache<T> {
|
||||||
|
// conditions and next could/should be part of the same vector
|
||||||
conditions: Vec<Interned<T>>,
|
conditions: Vec<Interned<T>>,
|
||||||
next: Vec<Self>,
|
next: Vec<Self>,
|
||||||
pub forbidden: SmallBitmap<T>,
|
pub forbidden: SmallBitmap<T>,
|
||||||
@@ -27,7 +28,7 @@ impl<T> DeadEndsCache<T> {
|
|||||||
self.forbidden.insert(condition);
|
self.forbidden.insert(condition);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn advance(&mut self, condition: Interned<T>) -> Option<&mut Self> {
|
fn advance(&mut self, condition: Interned<T>) -> Option<&mut Self> {
|
||||||
if let Some(idx) = self.conditions.iter().position(|c| *c == condition) {
|
if let Some(idx) = self.conditions.iter().position(|c| *c == condition) {
|
||||||
Some(&mut self.next[idx])
|
Some(&mut self.next[idx])
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -69,14 +69,9 @@ impl RankingRuleGraphTrait for FidGraph {
|
|||||||
|
|
||||||
let mut edges = vec![];
|
let mut edges = vec![];
|
||||||
for fid in all_fields {
|
for fid in all_fields {
|
||||||
// TODO: We can improve performances and relevancy by storing
|
|
||||||
// the term subsets associated to each field ids fetched.
|
|
||||||
edges.push((
|
edges.push((
|
||||||
fid as u32 * term.term_ids.len() as u32, // TODO improve the fid score i.e. fid^10.
|
fid as u32 * term.term_ids.len() as u32,
|
||||||
conditions_interner.insert(FidCondition {
|
conditions_interner.insert(FidCondition { term: term.clone(), fid }),
|
||||||
term: term.clone(), // TODO remove this ugly clone
|
|
||||||
fid,
|
|
||||||
}),
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -94,14 +94,9 @@ impl RankingRuleGraphTrait for PositionGraph {
|
|||||||
let mut edges = vec![];
|
let mut edges = vec![];
|
||||||
|
|
||||||
for (cost, positions) in positions_for_costs {
|
for (cost, positions) in positions_for_costs {
|
||||||
// TODO: We can improve performances and relevancy by storing
|
|
||||||
// the term subsets associated to each position fetched
|
|
||||||
edges.push((
|
edges.push((
|
||||||
cost,
|
cost,
|
||||||
conditions_interner.insert(PositionCondition {
|
conditions_interner.insert(PositionCondition { term: term.clone(), positions }),
|
||||||
term: term.clone(), // TODO remove this ugly clone
|
|
||||||
positions,
|
|
||||||
}),
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -65,13 +65,6 @@ pub fn compute_docids(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: add safeguard in case the cartesian product is too large!
|
|
||||||
// even if we restrict the word derivations to a maximum of 100, the size of the
|
|
||||||
// caterisan product could reach a maximum of 10_000 derivations, which is way too much.
|
|
||||||
// Maybe prioritise the product of zero typo derivations, then the product of zero-typo/one-typo
|
|
||||||
// + one-typo/zero-typo, then one-typo/one-typo, then ... until an arbitrary limit has been
|
|
||||||
// reached
|
|
||||||
|
|
||||||
for (left_phrase, left_word) in last_words_of_term_derivations(ctx, &left_term.term_subset)? {
|
for (left_phrase, left_word) in last_words_of_term_derivations(ctx, &left_term.term_subset)? {
|
||||||
// Before computing the edges, check that the left word and left phrase
|
// Before computing the edges, check that the left word and left phrase
|
||||||
// aren't disjoint with the universe, but only do it if there is more than
|
// aren't disjoint with the universe, but only do it if there is more than
|
||||||
@@ -111,8 +104,6 @@ pub fn compute_docids(
|
|||||||
Ok(ComputedCondition {
|
Ok(ComputedCondition {
|
||||||
docids,
|
docids,
|
||||||
universe_len: universe.len(),
|
universe_len: universe.len(),
|
||||||
// TODO: think about whether we want to reduce the subset,
|
|
||||||
// we probably should!
|
|
||||||
start_term_subset: Some(left_term.clone()),
|
start_term_subset: Some(left_term.clone()),
|
||||||
end_term_subset: right_term.clone(),
|
end_term_subset: right_term.clone(),
|
||||||
})
|
})
|
||||||
@@ -203,12 +194,7 @@ fn compute_non_prefix_edges(
|
|||||||
*docids |= new_docids;
|
*docids |= new_docids;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if backward_proximity >= 1
|
if backward_proximity >= 1 && left_phrase.is_none() && right_phrase.is_none() {
|
||||||
// TODO: for now, we don't do any swapping when either term is a phrase
|
|
||||||
// but maybe we should. We'd need to look at the first/last word of the phrase
|
|
||||||
// depending on the context.
|
|
||||||
&& left_phrase.is_none() && right_phrase.is_none()
|
|
||||||
{
|
|
||||||
if let Some(new_docids) =
|
if let Some(new_docids) =
|
||||||
ctx.get_db_word_pair_proximity_docids(word2, word1, backward_proximity)?
|
ctx.get_db_word_pair_proximity_docids(word2, word1, backward_proximity)?
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -33,8 +33,6 @@ pub fn compute_query_term_subset_docids(
|
|||||||
ctx: &mut SearchContext,
|
ctx: &mut SearchContext,
|
||||||
term: &QueryTermSubset,
|
term: &QueryTermSubset,
|
||||||
) -> Result<RoaringBitmap> {
|
) -> Result<RoaringBitmap> {
|
||||||
// TODO Use the roaring::MultiOps trait
|
|
||||||
|
|
||||||
let mut docids = RoaringBitmap::new();
|
let mut docids = RoaringBitmap::new();
|
||||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||||
if let Some(word_docids) = ctx.word_docids(word)? {
|
if let Some(word_docids) = ctx.word_docids(word)? {
|
||||||
@@ -59,8 +57,6 @@ pub fn compute_query_term_subset_docids_within_field_id(
|
|||||||
term: &QueryTermSubset,
|
term: &QueryTermSubset,
|
||||||
fid: u16,
|
fid: u16,
|
||||||
) -> Result<RoaringBitmap> {
|
) -> Result<RoaringBitmap> {
|
||||||
// TODO Use the roaring::MultiOps trait
|
|
||||||
|
|
||||||
let mut docids = RoaringBitmap::new();
|
let mut docids = RoaringBitmap::new();
|
||||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||||
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(word.interned(), fid)? {
|
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(word.interned(), fid)? {
|
||||||
@@ -71,7 +67,6 @@ pub fn compute_query_term_subset_docids_within_field_id(
|
|||||||
for phrase in term.all_phrases(ctx)? {
|
for phrase in term.all_phrases(ctx)? {
|
||||||
// There may be false positives when resolving a phrase, so we're not
|
// There may be false positives when resolving a phrase, so we're not
|
||||||
// guaranteed that all of its words are within a single fid.
|
// guaranteed that all of its words are within a single fid.
|
||||||
// TODO: fix this?
|
|
||||||
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
||||||
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(*word, fid)? {
|
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(*word, fid)? {
|
||||||
docids |= ctx.get_phrase_docids(phrase)? & word_fid_docids;
|
docids |= ctx.get_phrase_docids(phrase)? & word_fid_docids;
|
||||||
@@ -95,7 +90,6 @@ pub fn compute_query_term_subset_docids_within_position(
|
|||||||
term: &QueryTermSubset,
|
term: &QueryTermSubset,
|
||||||
position: u16,
|
position: u16,
|
||||||
) -> Result<RoaringBitmap> {
|
) -> Result<RoaringBitmap> {
|
||||||
// TODO Use the roaring::MultiOps trait
|
|
||||||
let mut docids = RoaringBitmap::new();
|
let mut docids = RoaringBitmap::new();
|
||||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||||
if let Some(word_position_docids) =
|
if let Some(word_position_docids) =
|
||||||
@@ -108,7 +102,6 @@ pub fn compute_query_term_subset_docids_within_position(
|
|||||||
for phrase in term.all_phrases(ctx)? {
|
for phrase in term.all_phrases(ctx)? {
|
||||||
// It's difficult to know the expected position of the words in the phrase,
|
// It's difficult to know the expected position of the words in the phrase,
|
||||||
// so instead we just check the first one.
|
// so instead we just check the first one.
|
||||||
// TODO: fix this?
|
|
||||||
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
||||||
if let Some(word_position_docids) = ctx.get_db_word_position_docids(*word, position)? {
|
if let Some(word_position_docids) = ctx.get_db_word_position_docids(*word, position)? {
|
||||||
docids |= ctx.get_phrase_docids(phrase)? & word_position_docids
|
docids |= ctx.get_phrase_docids(phrase)? & word_position_docids
|
||||||
@@ -132,9 +125,6 @@ pub fn compute_query_graph_docids(
|
|||||||
q: &QueryGraph,
|
q: &QueryGraph,
|
||||||
universe: &RoaringBitmap,
|
universe: &RoaringBitmap,
|
||||||
) -> Result<RoaringBitmap> {
|
) -> Result<RoaringBitmap> {
|
||||||
// TODO: there must be a faster way to compute this big
|
|
||||||
// roaring bitmap expression
|
|
||||||
|
|
||||||
let mut nodes_resolved = SmallBitmap::for_interned_values_in(&q.nodes);
|
let mut nodes_resolved = SmallBitmap::for_interned_values_in(&q.nodes);
|
||||||
let mut path_nodes_docids = q.nodes.map(|_| RoaringBitmap::new());
|
let mut path_nodes_docids = q.nodes.map(|_| RoaringBitmap::new());
|
||||||
|
|
||||||
|
|||||||
@@ -141,10 +141,6 @@ impl<'ctx, Query: RankingRuleQueryTrait> RankingRule<'ctx, Query> for Sort<'ctx,
|
|||||||
universe: &RoaringBitmap,
|
universe: &RoaringBitmap,
|
||||||
) -> Result<Option<RankingRuleOutput<Query>>> {
|
) -> Result<Option<RankingRuleOutput<Query>>> {
|
||||||
let iter = self.iter.as_mut().unwrap();
|
let iter = self.iter.as_mut().unwrap();
|
||||||
// TODO: we should make use of the universe in the function below
|
|
||||||
// good for correctness, but ideally iter.next_bucket would take the current universe into account,
|
|
||||||
// as right now it could return buckets that don't intersect with the universe, meaning we will make many
|
|
||||||
// unneeded calls.
|
|
||||||
if let Some(mut bucket) = iter.next_bucket()? {
|
if let Some(mut bucket) = iter.next_bucket()? {
|
||||||
bucket.candidates &= universe;
|
bucket.candidates &= universe;
|
||||||
Ok(Some(bucket))
|
Ok(Some(bucket))
|
||||||
|
|||||||
@@ -527,7 +527,7 @@ fn test_distinct_all_candidates() {
|
|||||||
let SearchResult { documents_ids, candidates, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, candidates, .. } = s.execute().unwrap();
|
||||||
let candidates = candidates.iter().collect::<Vec<_>>();
|
let candidates = candidates.iter().collect::<Vec<_>>();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[14, 26, 4, 7, 17, 23, 1, 19, 25, 8, 20, 24]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[14, 26, 4, 7, 17, 23, 1, 19, 25, 8, 20, 24]");
|
||||||
// TODO: this is incorrect!
|
// This is incorrect, but unfortunately impossible to do better efficiently.
|
||||||
insta::assert_snapshot!(format!("{candidates:?}"), @"[1, 4, 7, 8, 14, 17, 19, 20, 23, 24, 25, 26]");
|
insta::assert_snapshot!(format!("{candidates:?}"), @"[1, 4, 7, 8, 14, 17, 19, 20, 23, 24, 25, 26]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -122,11 +122,11 @@ fn create_edge_cases_index() -> TempIndex {
|
|||||||
sta stb stc ste stf stg sth sti stj stk stl stm stn sto stp stq str stst stt stu stv stw stx sty stz
|
sta stb stc ste stf stg sth sti stj stk stl stm stn sto stp stq str stst stt stu stv stw stx sty stz
|
||||||
"
|
"
|
||||||
},
|
},
|
||||||
// The next 5 documents lay out a trap with the split word, phrase search, or synonym `sun flower`.
|
// The next 5 documents lay out a trap with the split word, phrase search, or synonym `sun flower`.
|
||||||
// If the search query is "sunflower", the split word "Sun Flower" will match some documents.
|
// If the search query is "sunflower", the split word "Sun Flower" will match some documents.
|
||||||
// If the query is `sunflower wilting`, then we should make sure that
|
// If the query is `sunflower wilting`, then we should make sure that
|
||||||
// the sprximity condition `flower wilting: sprx N` also comes with the condition
|
// the proximity condition `flower wilting: sprx N` also comes with the condition
|
||||||
// `sun wilting: sprx N+1`. TODO: this is not the exact condition we use for now.
|
// `sun wilting: sprx N+1`, but this is not the exact condition we use for now.
|
||||||
// We only check that the phrase `sun flower` exists and `flower wilting: sprx N`, which
|
// We only check that the phrase `sun flower` exists and `flower wilting: sprx N`, which
|
||||||
// is better than nothing but not the best.
|
// is better than nothing but not the best.
|
||||||
{
|
{
|
||||||
@@ -139,7 +139,7 @@ fn create_edge_cases_index() -> TempIndex {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": 3,
|
"id": 3,
|
||||||
// This document matches the query `sunflower wilting`, but the sprximity condition
|
// This document matches the query `sunflower wilting`, but the sprximity condition
|
||||||
// between `sunflower` and `wilting` cannot be through the split-word `Sun Flower`
|
// between `sunflower` and `wilting` cannot be through the split-word `Sun Flower`
|
||||||
// which would reduce to only `flower` and `wilting` being in sprximity.
|
// which would reduce to only `flower` and `wilting` being in sprximity.
|
||||||
"text": "A flower wilting under the sun, unlike a sunflower"
|
"text": "A flower wilting under the sun, unlike a sunflower"
|
||||||
@@ -299,7 +299,7 @@ fn test_proximity_split_word() {
|
|||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 5, 1, 3]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 5, 1, 3]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
// TODO: "2" and "4" should be swapped ideally
|
// "2" and "4" should be swapped ideally
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||||
@@ -316,7 +316,7 @@ fn test_proximity_split_word() {
|
|||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
// TODO: "2" and "4" should be swapped ideally
|
// "2" and "4" should be swapped ideally
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||||
@@ -341,7 +341,7 @@ fn test_proximity_split_word() {
|
|||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
// TODO: "2" and "4" should be swapped ideally
|
// "2" and "4" should be swapped ideally
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||||
|
|||||||
@@ -2,9 +2,8 @@
|
|||||||
This module tests the interactions between the proximity and typo ranking rules.
|
This module tests the interactions between the proximity and typo ranking rules.
|
||||||
|
|
||||||
The proximity ranking rule should transform the query graph such that it
|
The proximity ranking rule should transform the query graph such that it
|
||||||
only contains the word pairs that it used to compute its bucket.
|
only contains the word pairs that it used to compute its bucket, but this is not currently
|
||||||
|
implemented.
|
||||||
TODO: This is not currently implemented.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use crate::index::tests::TempIndex;
|
use crate::index::tests::TempIndex;
|
||||||
@@ -64,7 +63,7 @@ fn test_trap_basic() {
|
|||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0, 1]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0, 1]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
// TODO: this is incorrect, 1 should come before 0
|
// This is incorrect, 1 should come before 0
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
"\"summer. holiday. sommer holidty\"",
|
"\"summer. holiday. sommer holidty\"",
|
||||||
|
|||||||
@@ -571,8 +571,8 @@ fn test_typo_synonyms() {
|
|||||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
s.query("the fast brownish fox jumps over the lackadaisical dog");
|
s.query("the fast brownish fox jumps over the lackadaisical dog");
|
||||||
|
|
||||||
// TODO: is this correct? interaction of ngrams + synonyms means that the
|
// The interaction of ngrams + synonyms means that the multi-word synonyms end up having a typo cost.
|
||||||
// multi-word synonyms end up having a typo cost. This is probably not what we want.
|
// This is probably not what we want.
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[21, 0, 22]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[21, 0, 22]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
|
|||||||
@@ -89,7 +89,6 @@ Create a snapshot test of the given database.
|
|||||||
- `exact_word_docids`
|
- `exact_word_docids`
|
||||||
- `word_prefix_docids`
|
- `word_prefix_docids`
|
||||||
- `exact_word_prefix_docids`
|
- `exact_word_prefix_docids`
|
||||||
- `docid_word_positions`
|
|
||||||
- `word_pair_proximity_docids`
|
- `word_pair_proximity_docids`
|
||||||
- `word_prefix_pair_proximity_docids`
|
- `word_prefix_pair_proximity_docids`
|
||||||
- `word_position_docids`
|
- `word_position_docids`
|
||||||
@@ -217,11 +216,6 @@ pub fn snap_exact_word_prefix_docids(index: &Index) -> String {
|
|||||||
&format!("{s:<16} {}", display_bitmap(&b))
|
&format!("{s:<16} {}", display_bitmap(&b))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
pub fn snap_docid_word_positions(index: &Index) -> String {
|
|
||||||
make_db_snap_from_iter!(index, docid_word_positions, |((idx, s), b)| {
|
|
||||||
&format!("{idx:<6} {s:<16} {}", display_bitmap(&b))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn snap_word_pair_proximity_docids(index: &Index) -> String {
|
pub fn snap_word_pair_proximity_docids(index: &Index) -> String {
|
||||||
make_db_snap_from_iter!(index, word_pair_proximity_docids, |((proximity, word1, word2), b)| {
|
make_db_snap_from_iter!(index, word_pair_proximity_docids, |((proximity, word1, word2), b)| {
|
||||||
&format!("{proximity:<2} {word1:<16} {word2:<16} {}", display_bitmap(&b))
|
&format!("{proximity:<2} {word1:<16} {word2:<16} {}", display_bitmap(&b))
|
||||||
@@ -324,7 +318,7 @@ pub fn snap_field_distributions(index: &Index) -> String {
|
|||||||
let rtxn = index.read_txn().unwrap();
|
let rtxn = index.read_txn().unwrap();
|
||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
for (field, count) in index.field_distribution(&rtxn).unwrap() {
|
for (field, count) in index.field_distribution(&rtxn).unwrap() {
|
||||||
writeln!(&mut snap, "{field:<16} {count:<6}").unwrap();
|
writeln!(&mut snap, "{field:<16} {count:<6} |").unwrap();
|
||||||
}
|
}
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
@@ -334,7 +328,7 @@ pub fn snap_fields_ids_map(index: &Index) -> String {
|
|||||||
let mut snap = String::new();
|
let mut snap = String::new();
|
||||||
for field_id in fields_ids_map.ids() {
|
for field_id in fields_ids_map.ids() {
|
||||||
let name = fields_ids_map.name(field_id).unwrap();
|
let name = fields_ids_map.name(field_id).unwrap();
|
||||||
writeln!(&mut snap, "{field_id:<3} {name:<16}").unwrap();
|
writeln!(&mut snap, "{field_id:<3} {name:<16} |").unwrap();
|
||||||
}
|
}
|
||||||
snap
|
snap
|
||||||
}
|
}
|
||||||
@@ -477,9 +471,6 @@ macro_rules! full_snap_of_db {
|
|||||||
($index:ident, exact_word_prefix_docids) => {{
|
($index:ident, exact_word_prefix_docids) => {{
|
||||||
$crate::snapshot_tests::snap_exact_word_prefix_docids(&$index)
|
$crate::snapshot_tests::snap_exact_word_prefix_docids(&$index)
|
||||||
}};
|
}};
|
||||||
($index:ident, docid_word_positions) => {{
|
|
||||||
$crate::snapshot_tests::snap_docid_word_positions(&$index)
|
|
||||||
}};
|
|
||||||
($index:ident, word_pair_proximity_docids) => {{
|
($index:ident, word_pair_proximity_docids) => {{
|
||||||
$crate::snapshot_tests::snap_word_pair_proximity_docids(&$index)
|
$crate::snapshot_tests::snap_word_pair_proximity_docids(&$index)
|
||||||
}};
|
}};
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
source: milli/src/index.rs
|
source: milli/src/index.rs
|
||||||
---
|
---
|
||||||
age 1
|
age 1 |
|
||||||
id 2
|
id 2 |
|
||||||
name 2
|
name 2 |
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
source: milli/src/index.rs
|
source: milli/src/index.rs
|
||||||
---
|
---
|
||||||
age 1
|
age 1 |
|
||||||
id 2
|
id 2 |
|
||||||
name 2
|
name 2 |
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
prefix_word_pair_proximity_docids,
|
prefix_word_pair_proximity_docids,
|
||||||
@@ -80,7 +79,6 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids.clear(self.wtxn)?;
|
exact_word_docids.clear(self.wtxn)?;
|
||||||
word_prefix_docids.clear(self.wtxn)?;
|
word_prefix_docids.clear(self.wtxn)?;
|
||||||
exact_word_prefix_docids.clear(self.wtxn)?;
|
exact_word_prefix_docids.clear(self.wtxn)?;
|
||||||
docid_word_positions.clear(self.wtxn)?;
|
|
||||||
word_pair_proximity_docids.clear(self.wtxn)?;
|
word_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
prefix_word_pair_proximity_docids.clear(self.wtxn)?;
|
prefix_word_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
@@ -141,7 +139,6 @@ mod tests {
|
|||||||
|
|
||||||
assert!(index.word_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.word_prefix_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_prefix_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.docid_word_positions.is_empty(&rtxn).unwrap());
|
|
||||||
assert!(index.word_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.field_id_word_count_docids.is_empty(&rtxn).unwrap());
|
assert!(index.field_id_word_count_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.word_prefix_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_prefix_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::collections::btree_map::Entry;
|
use std::collections::btree_map::Entry;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||||
|
|
||||||
use fst::IntoStreamer;
|
use fst::IntoStreamer;
|
||||||
use heed::types::{ByteSlice, DecodeIgnore, Str, UnalignedSlice};
|
use heed::types::{ByteSlice, DecodeIgnore, Str, UnalignedSlice};
|
||||||
@@ -15,8 +15,7 @@ use crate::facet::FacetType;
|
|||||||
use crate::heed_codec::facet::FieldDocIdFacetCodec;
|
use crate::heed_codec::facet::FieldDocIdFacetCodec;
|
||||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||||
use crate::{
|
use crate::{
|
||||||
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec,
|
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec, BEU32,
|
||||||
SmallString32, BEU32,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct DeleteDocuments<'t, 'u, 'i> {
|
pub struct DeleteDocuments<'t, 'u, 'i> {
|
||||||
@@ -72,7 +71,6 @@ impl std::fmt::Display for DeletionStrategy {
|
|||||||
pub(crate) struct DetailedDocumentDeletionResult {
|
pub(crate) struct DetailedDocumentDeletionResult {
|
||||||
pub deleted_documents: u64,
|
pub deleted_documents: u64,
|
||||||
pub remaining_documents: u64,
|
pub remaining_documents: u64,
|
||||||
pub soft_deletion_used: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||||
@@ -109,11 +107,8 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
Some(docid)
|
Some(docid)
|
||||||
}
|
}
|
||||||
pub fn execute(self) -> Result<DocumentDeletionResult> {
|
pub fn execute(self) -> Result<DocumentDeletionResult> {
|
||||||
let DetailedDocumentDeletionResult {
|
let DetailedDocumentDeletionResult { deleted_documents, remaining_documents } =
|
||||||
deleted_documents,
|
self.execute_inner()?;
|
||||||
remaining_documents,
|
|
||||||
soft_deletion_used: _,
|
|
||||||
} = self.execute_inner()?;
|
|
||||||
|
|
||||||
Ok(DocumentDeletionResult { deleted_documents, remaining_documents })
|
Ok(DocumentDeletionResult { deleted_documents, remaining_documents })
|
||||||
}
|
}
|
||||||
@@ -134,7 +129,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
return Ok(DetailedDocumentDeletionResult {
|
return Ok(DetailedDocumentDeletionResult {
|
||||||
deleted_documents: 0,
|
deleted_documents: 0,
|
||||||
remaining_documents: 0,
|
remaining_documents: 0,
|
||||||
soft_deletion_used: false,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,7 +144,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
return Ok(DetailedDocumentDeletionResult {
|
return Ok(DetailedDocumentDeletionResult {
|
||||||
deleted_documents: current_documents_ids_len,
|
deleted_documents: current_documents_ids_len,
|
||||||
remaining_documents,
|
remaining_documents,
|
||||||
soft_deletion_used: false,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -219,7 +212,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
return Ok(DetailedDocumentDeletionResult {
|
return Ok(DetailedDocumentDeletionResult {
|
||||||
deleted_documents: self.to_delete_docids.len(),
|
deleted_documents: self.to_delete_docids.len(),
|
||||||
remaining_documents: documents_ids.len(),
|
remaining_documents: documents_ids.len(),
|
||||||
soft_deletion_used: true,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,7 +224,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
field_id_word_count_docids,
|
field_id_word_count_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
@@ -251,23 +242,9 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
facet_id_is_empty_docids,
|
facet_id_is_empty_docids,
|
||||||
documents,
|
documents,
|
||||||
} = self.index;
|
} = self.index;
|
||||||
|
// Remove from the documents database
|
||||||
// Retrieve the words contained in the documents.
|
|
||||||
let mut words = Vec::new();
|
|
||||||
for docid in &self.to_delete_docids {
|
for docid in &self.to_delete_docids {
|
||||||
documents.delete(self.wtxn, &BEU32::new(docid))?;
|
documents.delete(self.wtxn, &BEU32::new(docid))?;
|
||||||
|
|
||||||
// We iterate through the words positions of the document id, retrieve the word and delete the positions.
|
|
||||||
// We create an iterator to be able to get the content and delete the key-value itself.
|
|
||||||
// It's faster to acquire a cursor to get and delete, as we avoid traversing the LMDB B-Tree two times but only once.
|
|
||||||
let mut iter = docid_word_positions.prefix_iter_mut(self.wtxn, &(docid, ""))?;
|
|
||||||
while let Some(result) = iter.next() {
|
|
||||||
let ((_docid, word), _positions) = result?;
|
|
||||||
// This boolean will indicate if we must remove this word from the words FST.
|
|
||||||
words.push((SmallString32::from(word), false));
|
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
|
||||||
unsafe { iter.del_current()? };
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// We acquire the current external documents ids map...
|
// We acquire the current external documents ids map...
|
||||||
// Note that its soft-deleted document ids field will be equal to the `to_delete_docids`
|
// Note that its soft-deleted document ids field will be equal to the `to_delete_docids`
|
||||||
@@ -278,42 +255,27 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
let new_external_documents_ids = new_external_documents_ids.into_static();
|
let new_external_documents_ids = new_external_documents_ids.into_static();
|
||||||
self.index.put_external_documents_ids(self.wtxn, &new_external_documents_ids)?;
|
self.index.put_external_documents_ids(self.wtxn, &new_external_documents_ids)?;
|
||||||
|
|
||||||
// Maybe we can improve the get performance of the words
|
let mut words_to_keep = BTreeSet::default();
|
||||||
// if we sort the words first, keeping the LMDB pages in cache.
|
let mut words_to_delete = BTreeSet::default();
|
||||||
words.sort_unstable();
|
|
||||||
|
|
||||||
// We iterate over the words and delete the documents ids
|
// We iterate over the words and delete the documents ids
|
||||||
// from the word docids database.
|
// from the word docids database.
|
||||||
for (word, must_remove) in &mut words {
|
remove_from_word_docids(
|
||||||
remove_from_word_docids(
|
self.wtxn,
|
||||||
self.wtxn,
|
word_docids,
|
||||||
word_docids,
|
&self.to_delete_docids,
|
||||||
word.as_str(),
|
&mut words_to_keep,
|
||||||
must_remove,
|
&mut words_to_delete,
|
||||||
&self.to_delete_docids,
|
)?;
|
||||||
)?;
|
remove_from_word_docids(
|
||||||
|
self.wtxn,
|
||||||
remove_from_word_docids(
|
exact_word_docids,
|
||||||
self.wtxn,
|
&self.to_delete_docids,
|
||||||
exact_word_docids,
|
&mut words_to_keep,
|
||||||
word.as_str(),
|
&mut words_to_delete,
|
||||||
must_remove,
|
)?;
|
||||||
&self.to_delete_docids,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We construct an FST set that contains the words to delete from the words FST.
|
// We construct an FST set that contains the words to delete from the words FST.
|
||||||
let words_to_delete =
|
let words_to_delete = fst::Set::from_iter(words_to_delete.difference(&words_to_keep))?;
|
||||||
words.iter().filter_map(
|
|
||||||
|(word, must_remove)| {
|
|
||||||
if *must_remove {
|
|
||||||
Some(word.as_str())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let words_to_delete = fst::Set::from_iter(words_to_delete)?;
|
|
||||||
|
|
||||||
let new_words_fst = {
|
let new_words_fst = {
|
||||||
// We retrieve the current words FST from the database.
|
// We retrieve the current words FST from the database.
|
||||||
@@ -472,7 +434,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
Ok(DetailedDocumentDeletionResult {
|
Ok(DetailedDocumentDeletionResult {
|
||||||
deleted_documents: self.to_delete_docids.len(),
|
deleted_documents: self.to_delete_docids.len(),
|
||||||
remaining_documents: documents_ids.len(),
|
remaining_documents: documents_ids.len(),
|
||||||
soft_deletion_used: false,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -532,23 +493,24 @@ fn remove_from_word_prefix_docids(
|
|||||||
fn remove_from_word_docids(
|
fn remove_from_word_docids(
|
||||||
txn: &mut heed::RwTxn,
|
txn: &mut heed::RwTxn,
|
||||||
db: &heed::Database<Str, RoaringBitmapCodec>,
|
db: &heed::Database<Str, RoaringBitmapCodec>,
|
||||||
word: &str,
|
|
||||||
must_remove: &mut bool,
|
|
||||||
to_remove: &RoaringBitmap,
|
to_remove: &RoaringBitmap,
|
||||||
|
words_to_keep: &mut BTreeSet<String>,
|
||||||
|
words_to_remove: &mut BTreeSet<String>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// We create an iterator to be able to get the content and delete the word docids.
|
// We create an iterator to be able to get the content and delete the word docids.
|
||||||
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
|
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
|
||||||
// the LMDB B-Tree two times but only once.
|
// the LMDB B-Tree two times but only once.
|
||||||
let mut iter = db.prefix_iter_mut(txn, word)?;
|
let mut iter = db.iter_mut(txn)?;
|
||||||
if let Some((key, mut docids)) = iter.next().transpose()? {
|
while let Some((key, mut docids)) = iter.next().transpose()? {
|
||||||
if key == word {
|
let previous_len = docids.len();
|
||||||
let previous_len = docids.len();
|
docids -= to_remove;
|
||||||
docids -= to_remove;
|
if docids.is_empty() {
|
||||||
if docids.is_empty() {
|
// safety: we don't keep references from inside the LMDB database.
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
unsafe { iter.del_current()? };
|
||||||
unsafe { iter.del_current()? };
|
words_to_remove.insert(key.to_owned());
|
||||||
*must_remove = true;
|
} else {
|
||||||
} else if docids.len() != previous_len {
|
words_to_keep.insert(key.to_owned());
|
||||||
|
if docids.len() != previous_len {
|
||||||
let key = key.to_owned();
|
let key = key.to_owned();
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
// safety: we don't keep references from inside the LMDB database.
|
||||||
unsafe { iter.put_current(&key, &docids)? };
|
unsafe { iter.put_current(&key, &docids)? };
|
||||||
@@ -627,7 +589,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::index::tests::TempIndex;
|
use crate::index::tests::TempIndex;
|
||||||
use crate::{db_snap, Filter};
|
use crate::{db_snap, Filter, Search};
|
||||||
|
|
||||||
fn delete_documents<'t>(
|
fn delete_documents<'t>(
|
||||||
wtxn: &mut RwTxn<'t, '_>,
|
wtxn: &mut RwTxn<'t, '_>,
|
||||||
@@ -1199,4 +1161,52 @@ mod tests {
|
|||||||
DeletionStrategy::AlwaysSoft,
|
DeletionStrategy::AlwaysSoft,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn delete_words_exact_attributes() {
|
||||||
|
let index = TempIndex::new();
|
||||||
|
|
||||||
|
index
|
||||||
|
.update_settings(|settings| {
|
||||||
|
settings.set_primary_key(S("id"));
|
||||||
|
settings.set_searchable_fields(vec![S("text"), S("exact")]);
|
||||||
|
settings.set_exact_attributes(vec![S("exact")].into_iter().collect());
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
index
|
||||||
|
.add_documents(documents!([
|
||||||
|
{ "id": 0, "text": "hello" },
|
||||||
|
{ "id": 1, "exact": "hello"}
|
||||||
|
]))
|
||||||
|
.unwrap();
|
||||||
|
db_snap!(index, word_docids, 1, @r###"
|
||||||
|
hello [0, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, exact_word_docids, 1, @r###"
|
||||||
|
hello [1, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, words_fst, 1, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||||
|
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
let deleted_internal_ids =
|
||||||
|
delete_documents(&mut wtxn, &index, &["1"], DeletionStrategy::AlwaysHard);
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
|
db_snap!(index, word_docids, 2, @r###"
|
||||||
|
hello [0, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, exact_word_docids, 2, @"");
|
||||||
|
db_snap!(index, words_fst, 2, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(format!("{deleted_internal_ids:?}"), @"[1]");
|
||||||
|
let txn = index.read_txn().unwrap();
|
||||||
|
let words = index.words_fst(&txn).unwrap().into_stream().into_strs().unwrap();
|
||||||
|
insta::assert_snapshot!(format!("{words:?}"), @r###"["hello"]"###);
|
||||||
|
|
||||||
|
let mut s = Search::new(&txn, &index);
|
||||||
|
s.query("hello");
|
||||||
|
let crate::SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0]");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::{cmp, io};
|
use std::io;
|
||||||
|
|
||||||
use grenad::Sorter;
|
use grenad::Sorter;
|
||||||
|
|
||||||
@@ -54,11 +54,10 @@ pub fn extract_fid_word_count_docids<R: io::Read + io::Seek>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for position in read_u32_ne_bytes(value) {
|
for position in read_u32_ne_bytes(value) {
|
||||||
let (field_id, position) = relative_from_absolute_position(position);
|
let (field_id, _) = relative_from_absolute_position(position);
|
||||||
let word_count = position as u32 + 1;
|
|
||||||
|
|
||||||
let value = document_fid_wordcount.entry(field_id as FieldId).or_insert(0);
|
let value = document_fid_wordcount.entry(field_id as FieldId).or_insert(0);
|
||||||
*value = cmp::max(*value, word_count);
|
*value += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,7 +82,7 @@ fn drain_document_fid_wordcount_into_sorter(
|
|||||||
let mut key_buffer = Vec::new();
|
let mut key_buffer = Vec::new();
|
||||||
|
|
||||||
for (fid, count) in document_fid_wordcount.drain() {
|
for (fid, count) in document_fid_wordcount.drain() {
|
||||||
if count <= 10 {
|
if count <= 30 {
|
||||||
key_buffer.clear();
|
key_buffer.clear();
|
||||||
key_buffer.extend_from_slice(&fid.to_be_bytes());
|
key_buffer.extend_from_slice(&fid.to_be_bytes());
|
||||||
key_buffer.push(count as u8);
|
key_buffer.push(count as u8);
|
||||||
|
|||||||
@@ -325,8 +325,6 @@ fn send_and_extract_flattened_documents_data(
|
|||||||
// send docid_word_positions_chunk to DB writer
|
// send docid_word_positions_chunk to DB writer
|
||||||
let docid_word_positions_chunk =
|
let docid_word_positions_chunk =
|
||||||
unsafe { as_cloneable_grenad(&docid_word_positions_chunk)? };
|
unsafe { as_cloneable_grenad(&docid_word_positions_chunk)? };
|
||||||
let _ = lmdb_writer_sx
|
|
||||||
.send(Ok(TypedChunk::DocidWordPositions(docid_word_positions_chunk.clone())));
|
|
||||||
|
|
||||||
let _ =
|
let _ =
|
||||||
lmdb_writer_sx.send(Ok(TypedChunk::ScriptLanguageDocids(script_language_pair)));
|
lmdb_writer_sx.send(Ok(TypedChunk::ScriptLanguageDocids(script_language_pair)));
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use memmap2::Mmap;
|
use memmap2::Mmap;
|
||||||
|
|
||||||
/// Wrapper around Mmap allowing to virtualy clone grenad-chunks
|
/// Wrapper around Mmap allowing to virtually clone grenad-chunks
|
||||||
/// in a parallel process like the indexing.
|
/// in a parallel process like the indexing.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct ClonableMmap {
|
pub struct ClonableMmap {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use std::result::Result as StdResult;
|
|||||||
|
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::read_u32_ne_bytes;
|
|
||||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||||
use crate::update::index_documents::transform::Operation;
|
use crate::update::index_documents::transform::Operation;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
@@ -22,10 +21,6 @@ pub fn concat_u32s_array<'a>(_key: &[u8], values: &[Cow<'a, [u8]>]) -> Result<Co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn roaring_bitmap_from_u32s_array(slice: &[u8]) -> RoaringBitmap {
|
|
||||||
read_u32_ne_bytes(slice).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize_roaring_bitmap(bitmap: &RoaringBitmap, buffer: &mut Vec<u8>) -> io::Result<()> {
|
pub fn serialize_roaring_bitmap(bitmap: &RoaringBitmap, buffer: &mut Vec<u8>) -> io::Result<()> {
|
||||||
buffer.clear();
|
buffer.clear();
|
||||||
buffer.reserve(bitmap.serialized_size());
|
buffer.reserve(bitmap.serialized_size());
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ pub use grenad_helpers::{
|
|||||||
};
|
};
|
||||||
pub use merge_functions::{
|
pub use merge_functions::{
|
||||||
concat_u32s_array, keep_first, keep_latest_obkv, merge_cbo_roaring_bitmaps,
|
concat_u32s_array, keep_first, keep_latest_obkv, merge_cbo_roaring_bitmaps,
|
||||||
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs,
|
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs, serialize_roaring_bitmap,
|
||||||
roaring_bitmap_from_u32s_array, serialize_roaring_bitmap, MergeFn,
|
MergeFn,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::MAX_WORD_LENGTH;
|
use crate::MAX_WORD_LENGTH;
|
||||||
|
|||||||
@@ -236,7 +236,7 @@ where
|
|||||||
primary_key,
|
primary_key,
|
||||||
fields_ids_map,
|
fields_ids_map,
|
||||||
field_distribution,
|
field_distribution,
|
||||||
mut external_documents_ids,
|
new_external_documents_ids,
|
||||||
new_documents_ids,
|
new_documents_ids,
|
||||||
replaced_documents_ids,
|
replaced_documents_ids,
|
||||||
documents_count,
|
documents_count,
|
||||||
@@ -363,9 +363,6 @@ where
|
|||||||
deletion_builder.delete_documents(&replaced_documents_ids);
|
deletion_builder.delete_documents(&replaced_documents_ids);
|
||||||
let deleted_documents_result = deletion_builder.execute_inner()?;
|
let deleted_documents_result = deletion_builder.execute_inner()?;
|
||||||
debug!("{} documents actually deleted", deleted_documents_result.deleted_documents);
|
debug!("{} documents actually deleted", deleted_documents_result.deleted_documents);
|
||||||
if !deleted_documents_result.soft_deletion_used {
|
|
||||||
external_documents_ids.delete_soft_deleted_documents_ids_from_fsts()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let index_documents_ids = self.index.documents_ids(self.wtxn)?;
|
let index_documents_ids = self.index.documents_ids(self.wtxn)?;
|
||||||
@@ -445,6 +442,9 @@ where
|
|||||||
self.index.put_primary_key(self.wtxn, &primary_key)?;
|
self.index.put_primary_key(self.wtxn, &primary_key)?;
|
||||||
|
|
||||||
// We write the external documents ids into the main database.
|
// We write the external documents ids into the main database.
|
||||||
|
let mut external_documents_ids = self.index.external_documents_ids(self.wtxn)?;
|
||||||
|
external_documents_ids.insert_ids(&new_external_documents_ids)?;
|
||||||
|
let external_documents_ids = external_documents_ids.into_static();
|
||||||
self.index.put_external_documents_ids(self.wtxn, &external_documents_ids)?;
|
self.index.put_external_documents_ids(self.wtxn, &external_documents_ids)?;
|
||||||
|
|
||||||
let all_documents_ids = index_documents_ids | new_documents_ids;
|
let all_documents_ids = index_documents_ids | new_documents_ids;
|
||||||
@@ -2471,11 +2471,11 @@ mod tests {
|
|||||||
{
|
{
|
||||||
"id": 3,
|
"id": 3,
|
||||||
"text": "a a a a a a a a a a a a a a a a a
|
"text": "a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||||
a a a a a a a a a a a a a a a a a a a a a "
|
a a a a a a a a a a a a a a a a a a a a a "
|
||||||
}
|
}
|
||||||
]))
|
]))
|
||||||
@@ -2513,6 +2513,171 @@ mod tests {
|
|||||||
|
|
||||||
db_snap!(index, word_fid_docids, 3, @"4c2e2a1832e5802796edc1638136d933");
|
db_snap!(index, word_fid_docids, 3, @"4c2e2a1832e5802796edc1638136d933");
|
||||||
db_snap!(index, word_position_docids, 3, @"74f556b91d161d997a89468b4da1cb8f");
|
db_snap!(index, word_position_docids, 3, @"74f556b91d161d997a89468b4da1cb8f");
|
||||||
db_snap!(index, docid_word_positions, 3, @"5287245332627675740b28bd46e1cde1");
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn reproduce_the_bug() {
|
||||||
|
/*
|
||||||
|
[milli/examples/fuzz.rs:69] &batches = [
|
||||||
|
Batch(
|
||||||
|
[
|
||||||
|
AddDoc(
|
||||||
|
{ "id": 1, "doggo": "bernese" }, => internal 0
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
Batch(
|
||||||
|
[
|
||||||
|
DeleteDoc(
|
||||||
|
1, => delete internal 0
|
||||||
|
),
|
||||||
|
AddDoc(
|
||||||
|
{ "id": 0, "catto": "jorts" }, => internal 1
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
Batch(
|
||||||
|
[
|
||||||
|
AddDoc(
|
||||||
|
{ "id": 1, "catto": "jorts" }, => internal 2
|
||||||
|
),
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
*/
|
||||||
|
let mut index = TempIndex::new();
|
||||||
|
index.index_documents_config.deletion_strategy = DeletionStrategy::AlwaysHard;
|
||||||
|
|
||||||
|
// START OF BATCH
|
||||||
|
|
||||||
|
println!("--- ENTERING BATCH 1");
|
||||||
|
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
|
||||||
|
let builder = IndexDocuments::new(
|
||||||
|
&mut wtxn,
|
||||||
|
&index,
|
||||||
|
&index.indexer_config,
|
||||||
|
index.index_documents_config.clone(),
|
||||||
|
|_| (),
|
||||||
|
|| false,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// OP
|
||||||
|
|
||||||
|
let documents = documents!([
|
||||||
|
{ "id": 1, "doggo": "bernese" },
|
||||||
|
]);
|
||||||
|
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||||
|
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||||
|
|
||||||
|
// FINISHING
|
||||||
|
let addition = builder.execute().unwrap();
|
||||||
|
insta::assert_debug_snapshot!(addition, @r###"
|
||||||
|
DocumentAdditionResult {
|
||||||
|
indexed_documents: 1,
|
||||||
|
number_of_documents: 1,
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
|
db_snap!(index, documents, @r###"
|
||||||
|
{"id":1,"doggo":"bernese"}
|
||||||
|
"###);
|
||||||
|
db_snap!(index, external_documents_ids, @r###"
|
||||||
|
soft:
|
||||||
|
hard:
|
||||||
|
1 0
|
||||||
|
"###);
|
||||||
|
|
||||||
|
// A first batch of documents has been inserted
|
||||||
|
|
||||||
|
// BATCH 2
|
||||||
|
|
||||||
|
println!("--- ENTERING BATCH 2");
|
||||||
|
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
|
||||||
|
let builder = IndexDocuments::new(
|
||||||
|
&mut wtxn,
|
||||||
|
&index,
|
||||||
|
&index.indexer_config,
|
||||||
|
index.index_documents_config.clone(),
|
||||||
|
|_| (),
|
||||||
|
|| false,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let (builder, removed) = builder.remove_documents(vec![S("1")]).unwrap();
|
||||||
|
insta::assert_display_snapshot!(removed.unwrap(), @"1");
|
||||||
|
|
||||||
|
let documents = documents!([
|
||||||
|
{ "id": 0, "catto": "jorts" },
|
||||||
|
]);
|
||||||
|
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||||
|
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||||
|
|
||||||
|
let addition = builder.execute().unwrap();
|
||||||
|
insta::assert_debug_snapshot!(addition, @r###"
|
||||||
|
DocumentAdditionResult {
|
||||||
|
indexed_documents: 1,
|
||||||
|
number_of_documents: 1,
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
|
db_snap!(index, documents, @r###"
|
||||||
|
{"id":0,"catto":"jorts"}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
db_snap!(index, external_documents_ids, @r###"
|
||||||
|
soft:
|
||||||
|
hard:
|
||||||
|
0 1
|
||||||
|
"###);
|
||||||
|
|
||||||
|
db_snap!(index, soft_deleted_documents_ids, @"[]");
|
||||||
|
|
||||||
|
// BATCH 3
|
||||||
|
|
||||||
|
println!("--- ENTERING BATCH 3");
|
||||||
|
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
|
||||||
|
let builder = IndexDocuments::new(
|
||||||
|
&mut wtxn,
|
||||||
|
&index,
|
||||||
|
&index.indexer_config,
|
||||||
|
index.index_documents_config.clone(),
|
||||||
|
|_| (),
|
||||||
|
|| false,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let documents = documents!([
|
||||||
|
{ "id": 1, "catto": "jorts" },
|
||||||
|
]);
|
||||||
|
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||||
|
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||||
|
|
||||||
|
let addition = builder.execute().unwrap();
|
||||||
|
insta::assert_debug_snapshot!(addition, @r###"
|
||||||
|
DocumentAdditionResult {
|
||||||
|
indexed_documents: 1,
|
||||||
|
number_of_documents: 2,
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
|
db_snap!(index, documents, @r###"
|
||||||
|
{"id":1,"catto":"jorts"}
|
||||||
|
{"id":0,"catto":"jorts"}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
// Ensuring all the returned IDs actually exists
|
||||||
|
let rtxn = index.read_txn().unwrap();
|
||||||
|
let res = index.search(&rtxn).execute().unwrap();
|
||||||
|
index.documents(&rtxn, res.documents_ids).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,15 +21,14 @@ use crate::error::{Error, InternalError, UserError};
|
|||||||
use crate::index::{db_name, main_key};
|
use crate::index::{db_name, main_key};
|
||||||
use crate::update::{AvailableDocumentsIds, ClearDocuments, UpdateIndexingStep};
|
use crate::update::{AvailableDocumentsIds, ClearDocuments, UpdateIndexingStep};
|
||||||
use crate::{
|
use crate::{
|
||||||
ExternalDocumentsIds, FieldDistribution, FieldId, FieldIdMapMissingEntry, FieldsIdsMap, Index,
|
FieldDistribution, FieldId, FieldIdMapMissingEntry, FieldsIdsMap, Index, Result, BEU32,
|
||||||
Result, BEU32,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct TransformOutput {
|
pub struct TransformOutput {
|
||||||
pub primary_key: String,
|
pub primary_key: String,
|
||||||
pub fields_ids_map: FieldsIdsMap,
|
pub fields_ids_map: FieldsIdsMap,
|
||||||
pub field_distribution: FieldDistribution,
|
pub field_distribution: FieldDistribution,
|
||||||
pub external_documents_ids: ExternalDocumentsIds<'static>,
|
pub new_external_documents_ids: fst::Map<Cow<'static, [u8]>>,
|
||||||
pub new_documents_ids: RoaringBitmap,
|
pub new_documents_ids: RoaringBitmap,
|
||||||
pub replaced_documents_ids: RoaringBitmap,
|
pub replaced_documents_ids: RoaringBitmap,
|
||||||
pub documents_count: usize,
|
pub documents_count: usize,
|
||||||
@@ -568,8 +567,6 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
}))?
|
}))?
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
let mut external_documents_ids = self.index.external_documents_ids(wtxn)?;
|
|
||||||
|
|
||||||
// We create a final writer to write the new documents in order from the sorter.
|
// We create a final writer to write the new documents in order from the sorter.
|
||||||
let mut writer = create_writer(
|
let mut writer = create_writer(
|
||||||
self.indexer_settings.chunk_compression_type,
|
self.indexer_settings.chunk_compression_type,
|
||||||
@@ -651,13 +648,12 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
fst_new_external_documents_ids_builder.insert(key, value)
|
fst_new_external_documents_ids_builder.insert(key, value)
|
||||||
})?;
|
})?;
|
||||||
let new_external_documents_ids = fst_new_external_documents_ids_builder.into_map();
|
let new_external_documents_ids = fst_new_external_documents_ids_builder.into_map();
|
||||||
external_documents_ids.insert_ids(&new_external_documents_ids)?;
|
|
||||||
|
|
||||||
Ok(TransformOutput {
|
Ok(TransformOutput {
|
||||||
primary_key,
|
primary_key,
|
||||||
fields_ids_map: self.fields_ids_map,
|
fields_ids_map: self.fields_ids_map,
|
||||||
field_distribution,
|
field_distribution,
|
||||||
external_documents_ids: external_documents_ids.into_static(),
|
new_external_documents_ids: new_external_documents_ids.map_data(Cow::Owned).unwrap(),
|
||||||
new_documents_ids: self.new_documents_ids,
|
new_documents_ids: self.new_documents_ids,
|
||||||
replaced_documents_ids: self.replaced_documents_ids,
|
replaced_documents_ids: self.replaced_documents_ids,
|
||||||
documents_count: self.documents_count,
|
documents_count: self.documents_count,
|
||||||
@@ -691,7 +687,8 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
let new_external_documents_ids = {
|
let new_external_documents_ids = {
|
||||||
let mut external_documents_ids = self.index.external_documents_ids(wtxn)?;
|
let mut external_documents_ids = self.index.external_documents_ids(wtxn)?;
|
||||||
external_documents_ids.delete_soft_deleted_documents_ids_from_fsts()?;
|
external_documents_ids.delete_soft_deleted_documents_ids_from_fsts()?;
|
||||||
external_documents_ids
|
// This call should be free and can't fail since the previous method merged both fsts.
|
||||||
|
external_documents_ids.into_static().to_fst()?.into_owned()
|
||||||
};
|
};
|
||||||
|
|
||||||
let documents_ids = self.index.documents_ids(wtxn)?;
|
let documents_ids = self.index.documents_ids(wtxn)?;
|
||||||
@@ -776,7 +773,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
|||||||
primary_key,
|
primary_key,
|
||||||
fields_ids_map: new_fields_ids_map,
|
fields_ids_map: new_fields_ids_map,
|
||||||
field_distribution,
|
field_distribution,
|
||||||
external_documents_ids: new_external_documents_ids.into_static(),
|
new_external_documents_ids,
|
||||||
new_documents_ids: documents_ids,
|
new_documents_ids: documents_ids,
|
||||||
replaced_documents_ids: RoaringBitmap::default(),
|
replaced_documents_ids: RoaringBitmap::default(),
|
||||||
documents_count,
|
documents_count,
|
||||||
|
|||||||
@@ -7,24 +7,19 @@ use std::io;
|
|||||||
use charabia::{Language, Script};
|
use charabia::{Language, Script};
|
||||||
use grenad::MergerBuilder;
|
use grenad::MergerBuilder;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
use heed::{BytesDecode, RwTxn};
|
use heed::RwTxn;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::helpers::{
|
use super::helpers::{
|
||||||
self, merge_ignore_values, roaring_bitmap_from_u32s_array, serialize_roaring_bitmap,
|
self, merge_ignore_values, serialize_roaring_bitmap, valid_lmdb_key, CursorClonableMmap,
|
||||||
valid_lmdb_key, CursorClonableMmap,
|
|
||||||
};
|
};
|
||||||
use super::{ClonableMmap, MergeFn};
|
use super::{ClonableMmap, MergeFn};
|
||||||
use crate::facet::FacetType;
|
use crate::facet::FacetType;
|
||||||
use crate::update::facet::FacetsUpdate;
|
use crate::update::facet::FacetsUpdate;
|
||||||
use crate::update::index_documents::helpers::as_cloneable_grenad;
|
use crate::update::index_documents::helpers::as_cloneable_grenad;
|
||||||
use crate::{
|
use crate::{lat_lng_to_xyz, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index, Result};
|
||||||
lat_lng_to_xyz, BoRoaringBitmapCodec, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index,
|
|
||||||
Result,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) enum TypedChunk {
|
pub(crate) enum TypedChunk {
|
||||||
DocidWordPositions(grenad::Reader<CursorClonableMmap>),
|
|
||||||
FieldIdDocidFacetStrings(grenad::Reader<CursorClonableMmap>),
|
FieldIdDocidFacetStrings(grenad::Reader<CursorClonableMmap>),
|
||||||
FieldIdDocidFacetNumbers(grenad::Reader<CursorClonableMmap>),
|
FieldIdDocidFacetNumbers(grenad::Reader<CursorClonableMmap>),
|
||||||
Documents(grenad::Reader<CursorClonableMmap>),
|
Documents(grenad::Reader<CursorClonableMmap>),
|
||||||
@@ -56,29 +51,6 @@ pub(crate) fn write_typed_chunk_into_index(
|
|||||||
) -> Result<(RoaringBitmap, bool)> {
|
) -> Result<(RoaringBitmap, bool)> {
|
||||||
let mut is_merged_database = false;
|
let mut is_merged_database = false;
|
||||||
match typed_chunk {
|
match typed_chunk {
|
||||||
TypedChunk::DocidWordPositions(docid_word_positions_iter) => {
|
|
||||||
write_entries_into_database(
|
|
||||||
docid_word_positions_iter,
|
|
||||||
&index.docid_word_positions,
|
|
||||||
wtxn,
|
|
||||||
index_is_empty,
|
|
||||||
|value, buffer| {
|
|
||||||
// ensure that values are unique and ordered
|
|
||||||
let positions = roaring_bitmap_from_u32s_array(value);
|
|
||||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
|
||||||
Ok(buffer)
|
|
||||||
},
|
|
||||||
|new_values, db_values, buffer| {
|
|
||||||
let new_values = roaring_bitmap_from_u32s_array(new_values);
|
|
||||||
let positions = match BoRoaringBitmapCodec::bytes_decode(db_values) {
|
|
||||||
Some(db_values) => new_values | db_values,
|
|
||||||
None => new_values, // should not happen
|
|
||||||
};
|
|
||||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
TypedChunk::Documents(obkv_documents_iter) => {
|
TypedChunk::Documents(obkv_documents_iter) => {
|
||||||
let mut cursor = obkv_documents_iter.into_cursor()?;
|
let mut cursor = obkv_documents_iter.into_cursor()?;
|
||||||
while let Some((key, value)) = cursor.move_on_next()? {
|
while let Some((key, value)) = cursor.move_on_next()? {
|
||||||
|
|||||||
Reference in New Issue
Block a user