mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-07-20 13:30:38 +00:00
Compare commits
9 Commits
index-stat
...
prototype-
Author | SHA1 | Date | |
---|---|---|---|
3fa40a3f9c | |||
5675847a5f | |||
1c77117d02 | |||
26dc415d9e | |||
89a4e7cee4 | |||
f2040e50b2 | |||
2b62e85622 | |||
c13e3d5c8a | |||
73a8018eb1 |
@ -2,3 +2,4 @@ target
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
.gitignore
|
||||
**/.git
|
||||
|
47
.github/scripts/check-release.sh
vendored
47
.github/scripts/check-release.sh
vendored
@ -1,41 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu -o pipefail
|
||||
#!/bin/bash
|
||||
|
||||
check_tag() {
|
||||
local expected=$1
|
||||
local actual=$2
|
||||
local filename=$3
|
||||
|
||||
if [[ $actual != $expected ]]; then
|
||||
echo >&2 "Error: the current tag does not match the version in $filename: found $actual, expected $expected"
|
||||
return 1
|
||||
fi
|
||||
# check_tag $current_tag $file_tag $file_name
|
||||
function check_tag {
|
||||
if [[ "$1" != "$2" ]]; then
|
||||
echo "Error: the current tag does not match the version in Cargo.toml: found $2 - expected $1"
|
||||
ret=1
|
||||
fi
|
||||
}
|
||||
|
||||
read_version() {
|
||||
grep '^version = ' | cut -d \" -f 2
|
||||
}
|
||||
|
||||
if [[ -z "${GITHUB_REF:-}" ]]; then
|
||||
echo >&2 "Error: GITHUB_REF is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! "$GITHUB_REF" =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+(-[a-z0-9]+)?$ ]]; then
|
||||
echo >&2 "Error: GITHUB_REF is not a valid tag: $GITHUB_REF"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
current_tag=${GITHUB_REF#refs/tags/v}
|
||||
ret=0
|
||||
current_tag=${GITHUB_REF#'refs/tags/v'}
|
||||
|
||||
toml_tag="$(cat Cargo.toml | read_version)"
|
||||
check_tag "$current_tag" "$toml_tag" Cargo.toml || ret=1
|
||||
file_tag="$(grep '^version = ' Cargo.toml | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')"
|
||||
check_tag $current_tag $file_tag
|
||||
|
||||
lock_tag=$(grep -A 1 '^name = "meilisearch-auth"' Cargo.lock | read_version)
|
||||
check_tag "$current_tag" "$lock_tag" Cargo.lock || ret=1
|
||||
lock_file='Cargo.lock'
|
||||
lock_tag=$(grep -A 1 'name = "meilisearch-auth"' $lock_file | grep version | cut -d '=' -f 2 | tr -d '"' | tr -d ' ')
|
||||
check_tag $current_tag $lock_tag $lock_file
|
||||
|
||||
if (( ret == 0 )); then
|
||||
echo 'OK'
|
||||
if [[ "$ret" -eq 0 ]] ; then
|
||||
echo 'OK'
|
||||
fi
|
||||
exit $ret
|
||||
|
24
.github/workflows/fuzzer-indexing.yml
vendored
24
.github/workflows/fuzzer-indexing.yml
vendored
@ -1,24 +0,0 @@
|
||||
name: Run the indexing fuzzer
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
fuzz:
|
||||
name: Setup the action
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run the fuzzer
|
||||
run: |
|
||||
cargo run --release --bin fuzz-indexing
|
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@ -35,7 +35,7 @@ jobs:
|
||||
- name: Build deb package
|
||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||
- name: Upload debian pkg to release
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/debian/meilisearch.deb
|
||||
|
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/meilisearch
|
||||
@ -87,7 +87,7 @@ jobs:
|
||||
# No need to upload binaries for dry run (cron)
|
||||
- name: Upload binaries to release
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/release/${{ matrix.artifact_name }}
|
||||
@ -121,7 +121,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
@ -183,7 +183,7 @@ jobs:
|
||||
- name: Upload the binary to release
|
||||
# No need to upload binaries for dry run (cron)
|
||||
if: github.event_name == 'release'
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
file: target/${{ matrix.target }}/release/meilisearch
|
||||
|
7
.github/workflows/publish-docker-images.yml
vendored
7
.github/workflows/publish-docker-images.yml
vendored
@ -58,9 +58,13 @@ jobs:
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
@ -88,10 +92,13 @@ jobs:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
build-args: |
|
||||
COMMIT_SHA=${{ github.sha }}
|
||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||
GIT_TAG=${{ github.ref_name }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# /!\ Don't touch this without checking with Cloud team
|
||||
- name: Send CI information to Cloud team
|
||||
|
42
.github/workflows/sdks-tests.yml
vendored
42
.github/workflows/sdks-tests.yml
vendored
@ -3,11 +3,6 @@ name: SDKs tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
docker_image:
|
||||
description: 'The Meilisearch Docker image used'
|
||||
required: false
|
||||
default: nightly
|
||||
schedule:
|
||||
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
||||
|
||||
@ -16,28 +11,13 @@ env:
|
||||
MEILI_NO_ANALYTICS: 'true'
|
||||
|
||||
jobs:
|
||||
define-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker-image: ${{ steps.define-image.outputs.docker-image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Define the Docker image we need to use
|
||||
id: define-image
|
||||
run: |
|
||||
event=${{ github.event_name }}
|
||||
echo "docker-image=nightly" >> $GITHUB_OUTPUT
|
||||
if [[ $event == 'workflow_dispatch' ]]; then
|
||||
echo "docker-image=${{ github.event.inputs.docker_image }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
meilisearch-js-tests:
|
||||
needs: define-docker-image
|
||||
name: JS SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -67,12 +47,11 @@ jobs:
|
||||
run: yarn test:env:browser
|
||||
|
||||
instant-meilisearch-tests:
|
||||
needs: define-docker-image
|
||||
name: instant-meilisearch tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -94,12 +73,11 @@ jobs:
|
||||
run: yarn build
|
||||
|
||||
meilisearch-php-tests:
|
||||
needs: define-docker-image
|
||||
name: PHP SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -125,12 +103,11 @@ jobs:
|
||||
composer remove --dev guzzlehttp/guzzle http-interop/http-factory-guzzle
|
||||
|
||||
meilisearch-python-tests:
|
||||
needs: define-docker-image
|
||||
name: Python SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -150,12 +127,11 @@ jobs:
|
||||
run: pipenv run pytest
|
||||
|
||||
meilisearch-go-tests:
|
||||
needs: define-docker-image
|
||||
name: Go SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -163,7 +139,7 @@ jobs:
|
||||
- '7700:7700'
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: stable
|
||||
- uses: actions/checkout@v3
|
||||
@ -180,12 +156,11 @@ jobs:
|
||||
run: go test -v ./...
|
||||
|
||||
meilisearch-ruby-tests:
|
||||
needs: define-docker-image
|
||||
name: Ruby SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
@ -205,12 +180,11 @@ jobs:
|
||||
run: bundle exec rspec
|
||||
|
||||
meilisearch-rust-tests:
|
||||
needs: define-docker-image
|
||||
name: Rust SDK tests
|
||||
runs-on: ubuntu-latest
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:${{ needs.define-docker-image.outputs.docker-image }}
|
||||
image: getmeili/meilisearch:nightly
|
||||
env:
|
||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||
|
33
.github/workflows/test-suite.yml
vendored
33
.github/workflows/test-suite.yml
vendored
@ -43,7 +43,7 @@ jobs:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.4.0
|
||||
uses: Swatinem/rust-cache@v2.2.1
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -65,7 +65,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.4.0
|
||||
uses: Swatinem/rust-cache@v2.2.1
|
||||
- name: Run cargo check without any default features
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -105,29 +105,6 @@ jobs:
|
||||
command: test
|
||||
args: --workspace --locked --release --all-features
|
||||
|
||||
test-disabled-tokenization:
|
||||
name: Test disabled tokenization
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:18.04
|
||||
if: github.event_name == 'schedule'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install needed dependencies
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install --assume-yes build-essential curl
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Run cargo tree without default features and check lindera is not present
|
||||
run: |
|
||||
cargo tree -f '{p} {f}' -e normal --no-default-features | grep lindera -vqz
|
||||
- name: Run cargo tree with default features and check lindera is pressent
|
||||
run: |
|
||||
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||
|
||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||
test-debug:
|
||||
name: Run tests in debug
|
||||
@ -146,7 +123,7 @@ jobs:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.4.0
|
||||
uses: Swatinem/rust-cache@v2.2.1
|
||||
- name: Run tests in debug
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -165,7 +142,7 @@ jobs:
|
||||
override: true
|
||||
components: clippy
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.4.0
|
||||
uses: Swatinem/rust-cache@v2.2.1
|
||||
- name: Run cargo clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
@ -184,7 +161,7 @@ jobs:
|
||||
override: true
|
||||
components: rustfmt
|
||||
- name: Cache dependencies
|
||||
uses: Swatinem/rust-cache@v2.4.0
|
||||
uses: Swatinem/rust-cache@v2.2.1
|
||||
- name: Run cargo fmt
|
||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||
|
755
Cargo.lock
generated
755
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -10,12 +10,10 @@ members = [
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
"milli",
|
||||
"index-stats",
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks",
|
||||
"fuzzers",
|
||||
"benchmarks"
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
|
@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Compile
|
||||
FROM rust:alpine3.16 AS compiler
|
||||
|
||||
@ -11,7 +12,7 @@ ARG GIT_TAG
|
||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
COPY . .
|
||||
COPY --link . .
|
||||
RUN set -eux; \
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
if [ "$apkArch" = "aarch64" ]; then \
|
||||
@ -30,7 +31,7 @@ RUN apk update --quiet \
|
||||
|
||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||
#Â to find.
|
||||
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
COPY --from=compiler --link /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||
RUN ln -s /bin/meilisearch /meilisearch
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,19 +0,0 @@
|
||||
global:
|
||||
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||
|
||||
# Attach these labels to any time series or alerts when communicating with
|
||||
# external systems (federation, remote storage, Alertmanager).
|
||||
external_labels:
|
||||
monitor: 'codelab-monitor'
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: 'meilisearch'
|
||||
|
||||
# Override the global default and scrape targets from this job every 5 seconds.
|
||||
scrape_interval: 5s
|
||||
|
||||
static_configs:
|
||||
- targets: ['localhost:7700']
|
54
config.toml
54
config.toml
@ -1,131 +1,131 @@
|
||||
# This file shows the default configuration of Meilisearch.
|
||||
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
||||
|
||||
db_path = "./data.ms"
|
||||
# Designates the location where database files will be created and retrieved.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
||||
db_path = "./data.ms"
|
||||
|
||||
env = "development"
|
||||
# Configures the instance's environment. Value must be either `production` or `development`.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
||||
env = "development"
|
||||
|
||||
# The address on which the HTTP server will listen.
|
||||
http_addr = "localhost:7700"
|
||||
# The address on which the HTTP server will listen.
|
||||
|
||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||
|
||||
# no_analytics = true
|
||||
# Deactivates Meilisearch's built-in telemetry when provided.
|
||||
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
||||
# no_analytics = true
|
||||
|
||||
http_payload_size_limit = "100 MB"
|
||||
# Sets the maximum size of accepted payloads.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
||||
http_payload_size_limit = "100 MB"
|
||||
|
||||
log_level = "INFO"
|
||||
# Defines how much detail should be present in Meilisearch's logs.
|
||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
||||
log_level = "INFO"
|
||||
|
||||
# max_indexing_memory = "2 GiB"
|
||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
||||
# max_indexing_memory = "2 GiB"
|
||||
|
||||
# max_indexing_threads = 4
|
||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
||||
# max_indexing_threads = 4
|
||||
|
||||
#############
|
||||
### DUMPS ###
|
||||
#############
|
||||
|
||||
dump_dir = "dumps/"
|
||||
# Sets the directory where Meilisearch will create dump files.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
||||
dump_dir = "dumps/"
|
||||
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
||||
# import_dump = "./path/to/my/file.dump"
|
||||
|
||||
ignore_missing_dump = false
|
||||
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
||||
ignore_missing_dump = false
|
||||
|
||||
ignore_dump_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
||||
ignore_dump_if_db_exists = false
|
||||
|
||||
|
||||
#################
|
||||
### SNAPSHOTS ###
|
||||
#################
|
||||
|
||||
schedule_snapshot = false
|
||||
# Enables scheduled snapshots when true, disable when false (the default).
|
||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||
# between each snapshot, in seconds.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
||||
schedule_snapshot = false
|
||||
|
||||
snapshot_dir = "snapshots/"
|
||||
# Sets the directory where Meilisearch will store snapshots.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
||||
snapshot_dir = "snapshots/"
|
||||
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
||||
# import_snapshot = "./path/to/my/snapshot"
|
||||
|
||||
ignore_missing_snapshot = false
|
||||
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
||||
ignore_missing_snapshot = false
|
||||
|
||||
ignore_snapshot_if_db_exists = false
|
||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
||||
ignore_snapshot_if_db_exists = false
|
||||
|
||||
|
||||
###########
|
||||
### SSL ###
|
||||
###########
|
||||
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
# Enables client authentication in the specified path.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
||||
# ssl_auth_path = "./path/to/root"
|
||||
|
||||
# ssl_cert_path = "./path/to/certfile"
|
||||
# Sets the server's SSL certificates.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
||||
# ssl_cert_path = "./path/to/certfile"
|
||||
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
# Sets the server's SSL key files.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
||||
# ssl_key_path = "./path/to/private-key"
|
||||
|
||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||
# Sets the server's OCSP file.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||
|
||||
ssl_require_auth = false
|
||||
# Makes SSL authentication mandatory.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
||||
ssl_require_auth = false
|
||||
|
||||
ssl_resumption = false
|
||||
# Activates SSL session resumption.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
||||
ssl_resumption = false
|
||||
|
||||
ssl_tickets = false
|
||||
# Activates SSL tickets.
|
||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
||||
ssl_tickets = false
|
||||
|
||||
#############################
|
||||
### Experimental features ###
|
||||
#############################
|
||||
|
||||
experimental_enable_metrics = false
|
||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||
experimental_enable_metrics = false
|
||||
|
||||
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||
experimental_reduce_indexing_memory_usage = false
|
||||
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||
|
@ -358,6 +358,7 @@ impl<T> From<v5::Settings<T>> for v6::Settings<v6::Unchecked> {
|
||||
faceting: match settings.faceting {
|
||||
v5::Setting::Set(faceting) => v6::Setting::Set(v6::FacetingSettings {
|
||||
max_values_per_facet: faceting.max_values_per_facet.into(),
|
||||
sort_facet_values_by: v6::Setting::NotSet,
|
||||
}),
|
||||
v5::Setting::Reset => v6::Setting::Reset,
|
||||
v5::Setting::NotSet => v6::Setting::NotSet,
|
||||
|
@ -1,20 +0,0 @@
|
||||
[package]
|
||||
name = "fuzzers"
|
||||
publish = false
|
||||
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
description.workspace = true
|
||||
homepage.workspace = true
|
||||
readme.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
arbitrary = { version = "1.3.0", features = ["derive"] }
|
||||
clap = { version = "4.3.0", features = ["derive"] }
|
||||
fastrand = "1.9.0"
|
||||
milli = { path = "../milli" }
|
||||
serde = { version = "1.0.160", features = ["derive"] }
|
||||
serde_json = { version = "1.0.95", features = ["preserve_order"] }
|
||||
tempfile = "3.5.0"
|
@ -1,3 +0,0 @@
|
||||
# Fuzzers
|
||||
|
||||
The purpose of this crate is to contains all the handmade "fuzzer" we may need.
|
@ -1,152 +0,0 @@
|
||||
use std::num::NonZeroUsize;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::time::Duration;
|
||||
|
||||
use arbitrary::{Arbitrary, Unstructured};
|
||||
use clap::Parser;
|
||||
use fuzzers::Operation;
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig};
|
||||
use milli::Index;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
struct Batch([Operation; 5]);
|
||||
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
struct Opt {
|
||||
/// The number of fuzzer to run in parallel.
|
||||
#[clap(long)]
|
||||
par: Option<NonZeroUsize>,
|
||||
// We need to put a lot of newlines in the following documentation or else everything gets collapsed on one line
|
||||
/// The path in which the databases will be created.
|
||||
/// Using a ramdisk is recommended.
|
||||
///
|
||||
/// Linux:
|
||||
///
|
||||
/// sudo mount -t tmpfs -o size=2g tmpfs ramdisk # to create it
|
||||
///
|
||||
/// sudo umount ramdisk # to remove it
|
||||
///
|
||||
/// MacOS:
|
||||
///
|
||||
/// diskutil erasevolume HFS+ 'RAM Disk' `hdiutil attach -nobrowse -nomount ram://4194304 # create it
|
||||
///
|
||||
/// hdiutil detach /dev/:the_disk
|
||||
#[clap(long)]
|
||||
path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let opt = Opt::parse();
|
||||
let progression: &'static AtomicUsize = Box::leak(Box::new(AtomicUsize::new(0)));
|
||||
let stop: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false)));
|
||||
|
||||
let par = opt.par.unwrap_or_else(|| std::thread::available_parallelism().unwrap()).get();
|
||||
let mut handles = Vec::with_capacity(par);
|
||||
|
||||
for _ in 0..par {
|
||||
let opt = opt.clone();
|
||||
|
||||
let handle = std::thread::spawn(move || {
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(1024 * 1024 * 1024 * 1024);
|
||||
let tempdir = match opt.path {
|
||||
Some(path) => TempDir::new_in(path).unwrap(),
|
||||
None => TempDir::new().unwrap(),
|
||||
};
|
||||
let index = Index::new(options, tempdir.path()).unwrap();
|
||||
let indexer_config = IndexerConfig::default();
|
||||
let index_documents_config = IndexDocumentsConfig::default();
|
||||
|
||||
std::thread::scope(|s| {
|
||||
loop {
|
||||
if stop.load(Ordering::Relaxed) {
|
||||
return;
|
||||
}
|
||||
let v: Vec<u8> =
|
||||
std::iter::repeat_with(|| fastrand::u8(..)).take(1000).collect();
|
||||
|
||||
let mut data = Unstructured::new(&v);
|
||||
let batches = <[Batch; 5]>::arbitrary(&mut data).unwrap();
|
||||
// will be used to display the error once a thread crashes
|
||||
let dbg_input = format!("{:#?}", batches);
|
||||
|
||||
let handle = s.spawn(|| {
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
for batch in batches {
|
||||
let mut builder = IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&indexer_config,
|
||||
index_documents_config.clone(),
|
||||
|_| (),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for op in batch.0 {
|
||||
match op {
|
||||
Operation::AddDoc(doc) => {
|
||||
let documents =
|
||||
milli::documents::objects_from_json_value(doc.to_d());
|
||||
let documents =
|
||||
milli::documents::documents_batch_reader_from_objects(
|
||||
documents,
|
||||
);
|
||||
let (b, _added) = builder.add_documents(documents).unwrap();
|
||||
builder = b;
|
||||
}
|
||||
Operation::DeleteDoc(id) => {
|
||||
let (b, _removed) =
|
||||
builder.remove_documents(vec![id.to_s()]).unwrap();
|
||||
builder = b;
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.execute().unwrap();
|
||||
|
||||
// after executing a batch we check if the database is corrupted
|
||||
let res = index.search(&wtxn).execute().unwrap();
|
||||
index.documents(&wtxn, res.documents_ids).unwrap();
|
||||
progression.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
wtxn.abort().unwrap();
|
||||
});
|
||||
if let err @ Err(_) = handle.join() {
|
||||
stop.store(true, Ordering::Relaxed);
|
||||
err.expect(&dbg_input);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
std::thread::spawn(|| {
|
||||
let mut last_value = 0;
|
||||
let start = std::time::Instant::now();
|
||||
loop {
|
||||
let total = progression.load(Ordering::Relaxed);
|
||||
let elapsed = start.elapsed().as_secs();
|
||||
if elapsed > 3600 {
|
||||
// after 1 hour, stop the fuzzer, success
|
||||
std::process::exit(0);
|
||||
}
|
||||
println!(
|
||||
"Has been running for {:?} seconds. Tested {} new values for a total of {}.",
|
||||
elapsed,
|
||||
total - last_value,
|
||||
total
|
||||
);
|
||||
last_value = total;
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
});
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
use arbitrary::Arbitrary;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum Document {
|
||||
One,
|
||||
Two,
|
||||
Three,
|
||||
Four,
|
||||
Five,
|
||||
Six,
|
||||
}
|
||||
|
||||
impl Document {
|
||||
pub fn to_d(&self) -> Value {
|
||||
match self {
|
||||
Document::One => json!({ "id": 0, "doggo": "bernese" }),
|
||||
Document::Two => json!({ "id": 0, "doggo": "golden" }),
|
||||
Document::Three => json!({ "id": 0, "catto": "jorts" }),
|
||||
Document::Four => json!({ "id": 1, "doggo": "bernese" }),
|
||||
Document::Five => json!({ "id": 1, "doggo": "golden" }),
|
||||
Document::Six => json!({ "id": 1, "catto": "jorts" }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum DocId {
|
||||
Zero,
|
||||
One,
|
||||
}
|
||||
|
||||
impl DocId {
|
||||
pub fn to_s(&self) -> String {
|
||||
match self {
|
||||
DocId::Zero => "0".to_string(),
|
||||
DocId::One => "1".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Arbitrary)]
|
||||
pub enum Operation {
|
||||
AddDoc(Document),
|
||||
DeleteDoc(DocId),
|
||||
}
|
1007
grafana-dashboards/dashboard.json
Normal file
1007
grafana-dashboards/dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -160,7 +160,7 @@ impl BatchKind {
|
||||
impl BatchKind {
|
||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||
/// The boolean tell you if an index has been created by the batched task.
|
||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
||||
/// but false can't be returned if you needs to create an index.
|
||||
// TODO use an AutoBatchKind as input
|
||||
pub fn new(
|
||||
@ -214,7 +214,7 @@ impl BatchKind {
|
||||
|
||||
/// Returns a `ControlFlow::Break` if you must stop right now.
|
||||
/// The boolean tell you if an index has been created by the batched task.
|
||||
/// To ease the writing of the code. `true` can be returned when you don't need to create an index
|
||||
/// To ease the writting of the code. `true` can be returned when you don't need to create an index
|
||||
/// but false can't be returned if you needs to create an index.
|
||||
#[rustfmt::skip]
|
||||
fn accumulate(self, id: TaskId, kind: AutobatchKind, index_already_exists: bool, primary_key: Option<&str>) -> ControlFlow<BatchKind, BatchKind> {
|
||||
@ -321,18 +321,9 @@ impl BatchKind {
|
||||
})
|
||||
}
|
||||
(
|
||||
BatchKind::DocumentOperation { method, allow_index_creation, primary_key, mut operation_ids },
|
||||
this @ BatchKind::DocumentOperation { .. },
|
||||
K::DocumentDeletion,
|
||||
) => {
|
||||
operation_ids.push(id);
|
||||
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids,
|
||||
})
|
||||
}
|
||||
) => Break(this),
|
||||
// but we can't autobatch documents if it's not the same kind
|
||||
// this match branch MUST be AFTER the previous one
|
||||
(
|
||||
@ -355,35 +346,7 @@ impl BatchKind {
|
||||
deletion_ids.push(id);
|
||||
Continue(BatchKind::DocumentClear { ids: deletion_ids })
|
||||
}
|
||||
// we can autobatch the deletion and import if the index already exists
|
||||
(
|
||||
BatchKind::DocumentDeletion { mut deletion_ids },
|
||||
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||
) if index_already_exists => {
|
||||
deletion_ids.push(id);
|
||||
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids: deletion_ids,
|
||||
})
|
||||
}
|
||||
// we can autobatch the deletion and import if both can't create an index
|
||||
(
|
||||
BatchKind::DocumentDeletion { mut deletion_ids },
|
||||
K::DocumentImport { method, allow_index_creation, primary_key }
|
||||
) if !allow_index_creation => {
|
||||
deletion_ids.push(id);
|
||||
|
||||
Continue(BatchKind::DocumentOperation {
|
||||
method,
|
||||
allow_index_creation,
|
||||
primary_key,
|
||||
operation_ids: deletion_ids,
|
||||
})
|
||||
}
|
||||
// we can't autobatch a deletion and an import if the index does not exists but would be created by an addition
|
||||
// we can't autobatch a deletion and an import
|
||||
(
|
||||
this @ BatchKind::DocumentDeletion { .. },
|
||||
K::DocumentImport { .. }
|
||||
@ -685,36 +648,36 @@ mod tests {
|
||||
debug_snapshot!(autobatch_from(false,None, [settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false,None, [settings(false), settings(false), settings(false)]), @"Some((Settings { allow_index_creation: false, settings_ids: [0, 1, 2] }, false))");
|
||||
|
||||
// We can autobatch document addition with document deletion
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
// And the other way around
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0, 1] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0, 1] }, false))"###);
|
||||
// We can't autobatch document addition with document deletion
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: None, operation_ids: [0] }, true))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, None), doc_del()]), @"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: None, operation_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, true, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: true, primary_key: Some("catto"), operation_ids: [0] }, true))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(ReplaceDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: ReplaceDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_imp(UpdateDocuments, false, Some("catto")), doc_del()]), @r###"Some((DocumentOperation { method: UpdateDocuments, allow_index_creation: false, primary_key: Some("catto"), operation_ids: [0] }, false))"###);
|
||||
// we also can't do the only way around
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, true, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(true, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, None)]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(ReplaceDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
debug_snapshot!(autobatch_from(false, None, [doc_del(), doc_imp(UpdateDocuments, false, Some("catto"))]), @"Some((DocumentDeletion { deletion_ids: [0] }, false))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -998,7 +998,7 @@ impl IndexScheduler {
|
||||
}()
|
||||
.unwrap_or_default();
|
||||
|
||||
// The write transaction is directly owned and committed inside.
|
||||
// The write transaction is directly owned and commited inside.
|
||||
match self.index_mapper.delete_index(wtxn, &index_uid) {
|
||||
Ok(()) => (),
|
||||
Err(Error::IndexNotFound(_)) if index_has_been_created => (),
|
||||
|
@ -90,17 +90,8 @@ pub enum IndexStatus {
|
||||
pub struct IndexStats {
|
||||
/// Number of documents in the index.
|
||||
pub number_of_documents: u64,
|
||||
/// Size taken up by the index' DB, in bytes.
|
||||
///
|
||||
/// This includes the size taken by both the used and free pages of the DB, and as the free pages
|
||||
/// are not returned to the disk after a deletion, this number is typically larger than
|
||||
/// `used_database_size` that only includes the size of the used pages.
|
||||
/// Size of the index' DB, in bytes.
|
||||
pub database_size: u64,
|
||||
/// Size taken by the used pages of the index' DB, in bytes.
|
||||
///
|
||||
/// As the DB backend does not return to the disk the pages that are not currently used by the DB,
|
||||
/// this value is typically smaller than `database_size`.
|
||||
pub used_database_size: u64,
|
||||
/// Association of every field name with the number of times it occurs in the documents.
|
||||
pub field_distribution: FieldDistribution,
|
||||
/// Creation date of the index.
|
||||
@ -116,10 +107,10 @@ impl IndexStats {
|
||||
///
|
||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
||||
let database_size = index.on_disk_size()?;
|
||||
Ok(IndexStats {
|
||||
number_of_documents: index.number_of_documents(rtxn)?,
|
||||
database_size: index.on_disk_size()?,
|
||||
used_database_size: index.used_size()?,
|
||||
database_size,
|
||||
field_distribution: index.field_distribution(rtxn)?,
|
||||
created_at: index.created_at(rtxn)?,
|
||||
updated_at: index.updated_at(rtxn)?,
|
||||
|
@ -31,7 +31,7 @@ mod uuid_codec;
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub type TaskId = u32;
|
||||
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::{Bound, RangeBounds};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@ -573,16 +573,10 @@ impl IndexScheduler {
|
||||
&self.index_mapper.indexer_config
|
||||
}
|
||||
|
||||
/// Return the real database size (i.e.: The size **with** the free pages)
|
||||
pub fn size(&self) -> Result<u64> {
|
||||
Ok(self.env.real_disk_size()?)
|
||||
}
|
||||
|
||||
/// Return the used database size (i.e.: The size **without** the free pages)
|
||||
pub fn used_size(&self) -> Result<u64> {
|
||||
Ok(self.env.non_free_pages_size()?)
|
||||
}
|
||||
|
||||
/// Return the index corresponding to the name.
|
||||
///
|
||||
/// * If the index wasn't opened before, the index will be opened.
|
||||
@ -762,38 +756,6 @@ impl IndexScheduler {
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
/// The returned structure contains:
|
||||
/// 1. The name of the property being observed can be `statuses`, `types`, or `indexes`.
|
||||
/// 2. The name of the specific data related to the property can be `enqueued` for the `statuses`, `settingsUpdate` for the `types`, or the name of the index for the `indexes`, for example.
|
||||
/// 3. The number of times the properties appeared.
|
||||
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||
let rtxn = self.read_txn()?;
|
||||
|
||||
let mut res = BTreeMap::new();
|
||||
|
||||
res.insert(
|
||||
"statuses".to_string(),
|
||||
enum_iterator::all::<Status>()
|
||||
.map(|s| Ok((s.to_string(), self.get_status(&rtxn, s)?.len())))
|
||||
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||
);
|
||||
res.insert(
|
||||
"types".to_string(),
|
||||
enum_iterator::all::<Kind>()
|
||||
.map(|s| Ok((s.to_string(), self.get_kind(&rtxn, s)?.len())))
|
||||
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||
);
|
||||
res.insert(
|
||||
"indexes".to_string(),
|
||||
self.index_tasks
|
||||
.iter(&rtxn)?
|
||||
.map(|res| Ok(res.map(|(name, bitmap)| (name.to_string(), bitmap.len()))?))
|
||||
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||
);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Return true iff there is at least one task associated with this index
|
||||
/// that is processing.
|
||||
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
||||
@ -1785,7 +1747,7 @@ mod tests {
|
||||
assert_eq!(task.kind.as_kind(), k);
|
||||
}
|
||||
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "everything_is_successfully_registered");
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "everything_is_succesfully_registered");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -2075,105 +2037,6 @@ mod tests {
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "both_task_succeeded");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_addition_and_document_deletion() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||
|
||||
let content = r#"[
|
||||
{ "id": 1, "doggo": "jean bob" },
|
||||
{ "id": 2, "catto": "jorts" },
|
||||
{ "id": 3, "doggo": "bork" }
|
||||
]"#;
|
||||
|
||||
let (uuid, mut file) = index_scheduler.create_update_file_with_uuid(0).unwrap();
|
||||
let documents_count = read_json(content.as_bytes(), file.as_file_mut()).unwrap();
|
||||
file.persist().unwrap();
|
||||
index_scheduler
|
||||
.register(KindWithContent::DocumentAdditionOrUpdate {
|
||||
index_uid: S("doggos"),
|
||||
primary_key: Some(S("id")),
|
||||
method: ReplaceDocuments,
|
||||
content_file: uuid,
|
||||
documents_count,
|
||||
allow_index_creation: true,
|
||||
})
|
||||
.unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_first_task");
|
||||
index_scheduler
|
||||
.register(KindWithContent::DocumentDeletion {
|
||||
index_uid: S("doggos"),
|
||||
documents_ids: vec![S("1"), S("2")],
|
||||
})
|
||||
.unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_second_task");
|
||||
|
||||
handle.advance_one_successful_batch(); // The addition AND deletion should've been batched together
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_processing_the_batch");
|
||||
|
||||
let index = index_scheduler.index("doggos").unwrap();
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let field_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||
let field_ids = field_ids_map.ids().collect::<Vec<_>>();
|
||||
let documents = index
|
||||
.all_documents(&rtxn)
|
||||
.unwrap()
|
||||
.map(|ret| obkv_to_json(&field_ids, &field_ids_map, ret.unwrap().1).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
snapshot!(serde_json::to_string_pretty(&documents).unwrap(), name: "documents");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn document_deletion_and_document_addition() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||
index_scheduler
|
||||
.register(KindWithContent::DocumentDeletion {
|
||||
index_uid: S("doggos"),
|
||||
documents_ids: vec![S("1"), S("2")],
|
||||
})
|
||||
.unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_first_task");
|
||||
|
||||
let content = r#"[
|
||||
{ "id": 1, "doggo": "jean bob" },
|
||||
{ "id": 2, "catto": "jorts" },
|
||||
{ "id": 3, "doggo": "bork" }
|
||||
]"#;
|
||||
|
||||
let (uuid, mut file) = index_scheduler.create_update_file_with_uuid(0).unwrap();
|
||||
let documents_count = read_json(content.as_bytes(), file.as_file_mut()).unwrap();
|
||||
file.persist().unwrap();
|
||||
index_scheduler
|
||||
.register(KindWithContent::DocumentAdditionOrUpdate {
|
||||
index_uid: S("doggos"),
|
||||
primary_key: Some(S("id")),
|
||||
method: ReplaceDocuments,
|
||||
content_file: uuid,
|
||||
documents_count,
|
||||
allow_index_creation: true,
|
||||
})
|
||||
.unwrap();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "registered_the_second_task");
|
||||
|
||||
// The deletion should have failed because it can't create an index
|
||||
handle.advance_one_failed_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_failing_the_deletion");
|
||||
|
||||
// The addition should works
|
||||
handle.advance_one_successful_batch();
|
||||
snapshot!(snapshot_index_scheduler(&index_scheduler), name: "after_last_successful_addition");
|
||||
|
||||
let index = index_scheduler.index("doggos").unwrap();
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let field_ids_map = index.fields_ids_map(&rtxn).unwrap();
|
||||
let field_ids = field_ids_map.ids().collect::<Vec<_>>();
|
||||
let documents = index
|
||||
.all_documents(&rtxn)
|
||||
.unwrap()
|
||||
.map(|ret| obkv_to_json(&field_ids, &field_ids_map, ret.unwrap().1).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
snapshot!(serde_json::to_string_pretty(&documents).unwrap(), name: "documents");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn do_not_batch_task_of_different_indexes() {
|
||||
let (index_scheduler, mut handle) = IndexScheduler::test(true, vec![]);
|
||||
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
1 {uid: 1, status: succeeded, details: { received_document_ids: 2, deleted_documents: Some(2) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
"documentDeletion" [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 1, field_distribution: {"doggo": 1, "id": 1} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
[
|
||||
{
|
||||
"id": 3,
|
||||
"doggo": "bork"
|
||||
}
|
||||
]
|
@ -1,37 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
1 {uid: 1, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [0,]
|
||||
"documentDeletion" [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [1,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,46 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: failed, error: ResponseError { code: 200, message: "Index `doggos` not found.", error_code: "index_not_found", error_type: "invalid_request", error_link: "https://docs.meilisearch.com/errors#index_not_found" }, details: { received_document_ids: 2, deleted_documents: Some(0) }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: succeeded, details: { received_documents: 3, indexed_documents: Some(3) }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued []
|
||||
succeeded [1,]
|
||||
failed [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
doggos: { number_of_documents: 3, field_distribution: {"catto": 1, "doggo": 2, "id": 3} }
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,17 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"doggo": "jean bob"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"catto": "jorts"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"doggo": "bork"
|
||||
}
|
||||
]
|
@ -1,36 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -1,40 +0,0 @@
|
||||
---
|
||||
source: index-scheduler/src/lib.rs
|
||||
---
|
||||
### Autobatching Enabled = true
|
||||
### Processing Tasks:
|
||||
[]
|
||||
----------------------------------------------------------------------
|
||||
### All Tasks:
|
||||
0 {uid: 0, status: enqueued, details: { received_document_ids: 2, deleted_documents: None }, kind: DocumentDeletion { index_uid: "doggos", documents_ids: ["1", "2"] }}
|
||||
1 {uid: 1, status: enqueued, details: { received_documents: 3, indexed_documents: None }, kind: DocumentAdditionOrUpdate { index_uid: "doggos", primary_key: Some("id"), method: ReplaceDocuments, content_file: 00000000-0000-0000-0000-000000000000, documents_count: 3, allow_index_creation: true }}
|
||||
----------------------------------------------------------------------
|
||||
### Status:
|
||||
enqueued [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Kind:
|
||||
"documentAdditionOrUpdate" [1,]
|
||||
"documentDeletion" [0,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Tasks:
|
||||
doggos [0,1,]
|
||||
----------------------------------------------------------------------
|
||||
### Index Mapper:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Canceled By:
|
||||
|
||||
----------------------------------------------------------------------
|
||||
### Enqueued At:
|
||||
[timestamp] [0,]
|
||||
[timestamp] [1,]
|
||||
----------------------------------------------------------------------
|
||||
### Started At:
|
||||
----------------------------------------------------------------------
|
||||
### Finished At:
|
||||
----------------------------------------------------------------------
|
||||
### File Store:
|
||||
00000000-0000-0000-0000-000000000000
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
@ -466,7 +466,7 @@ impl IndexScheduler {
|
||||
}
|
||||
}
|
||||
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletion);
|
||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletionByFilter);
|
||||
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
||||
ref index_uid,
|
||||
ref filter_expr,
|
||||
|
@ -1,12 +0,0 @@
|
||||
[package]
|
||||
name = "index-stats"
|
||||
description = "A small program that computes internal stats of a Meilisearch index"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.71"
|
||||
clap = { version = "4.3.5", features = ["derive"] }
|
||||
milli = { path = "../milli" }
|
||||
piechart = "1.0.0"
|
@ -1,224 +0,0 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::Parser;
|
||||
use milli::heed::{types::ByteSlice, EnvOpenOptions, PolyDatabase, RoTxn};
|
||||
use milli::index::db_name::*;
|
||||
use milli::index::Index;
|
||||
use piechart::{Chart, Color, Data};
|
||||
|
||||
/// Simple program to greet a person
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// The path to the LMDB Meilisearch index database.
|
||||
path: PathBuf,
|
||||
|
||||
/// The radius of the graphs
|
||||
#[clap(long, default_value_t = 10)]
|
||||
graph_radius: u16,
|
||||
|
||||
/// The radius of the graphs
|
||||
#[clap(long, default_value_t = 6)]
|
||||
graph_aspect_ratio: u16,
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let Args { path, graph_radius, graph_aspect_ratio } = Args::parse();
|
||||
let env = EnvOpenOptions::new().max_dbs(24).open(path)?;
|
||||
|
||||
// TODO not sure to keep that...
|
||||
// if removed put the pub(crate) back in the Index struct
|
||||
matches!(
|
||||
Option::<Index>::None,
|
||||
Some(Index {
|
||||
env: _,
|
||||
main: _,
|
||||
word_docids: _,
|
||||
exact_word_docids: _,
|
||||
word_prefix_docids: _,
|
||||
exact_word_prefix_docids: _,
|
||||
word_pair_proximity_docids: _,
|
||||
word_prefix_pair_proximity_docids: _,
|
||||
prefix_word_pair_proximity_docids: _,
|
||||
word_position_docids: _,
|
||||
word_fid_docids: _,
|
||||
field_id_word_count_docids: _,
|
||||
word_prefix_position_docids: _,
|
||||
word_prefix_fid_docids: _,
|
||||
script_language_docids: _,
|
||||
facet_id_exists_docids: _,
|
||||
facet_id_is_null_docids: _,
|
||||
facet_id_is_empty_docids: _,
|
||||
facet_id_f64_docids: _,
|
||||
facet_id_string_docids: _,
|
||||
field_id_docid_facet_f64s: _,
|
||||
field_id_docid_facet_strings: _,
|
||||
documents: _,
|
||||
})
|
||||
);
|
||||
|
||||
let mut wtxn = env.write_txn()?;
|
||||
let main = env.create_poly_database(&mut wtxn, Some(MAIN))?;
|
||||
let word_docids = env.create_poly_database(&mut wtxn, Some(WORD_DOCIDS))?;
|
||||
let exact_word_docids = env.create_poly_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?;
|
||||
let word_prefix_docids = env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||
let exact_word_prefix_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||
let word_pair_proximity_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let script_language_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
||||
let word_prefix_pair_proximity_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let prefix_word_pair_proximity_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let word_position_docids = env.create_poly_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?;
|
||||
let word_fid_docids = env.create_poly_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?;
|
||||
let field_id_word_count_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
||||
let word_prefix_position_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
||||
let word_prefix_fid_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
||||
let facet_id_f64_docids = env.create_poly_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?;
|
||||
let facet_id_string_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?;
|
||||
let facet_id_exists_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?;
|
||||
let facet_id_is_null_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?;
|
||||
let facet_id_is_empty_docids =
|
||||
env.create_poly_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
||||
let field_id_docid_facet_f64s =
|
||||
env.create_poly_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?;
|
||||
let field_id_docid_facet_strings =
|
||||
env.create_poly_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
||||
let documents = env.create_poly_database(&mut wtxn, Some(DOCUMENTS))?;
|
||||
wtxn.commit()?;
|
||||
|
||||
let list = [
|
||||
(main, MAIN),
|
||||
(word_docids, WORD_DOCIDS),
|
||||
(exact_word_docids, EXACT_WORD_DOCIDS),
|
||||
(word_prefix_docids, WORD_PREFIX_DOCIDS),
|
||||
(exact_word_prefix_docids, EXACT_WORD_PREFIX_DOCIDS),
|
||||
(word_pair_proximity_docids, WORD_PAIR_PROXIMITY_DOCIDS),
|
||||
(script_language_docids, SCRIPT_LANGUAGE_DOCIDS),
|
||||
(word_prefix_pair_proximity_docids, WORD_PREFIX_PAIR_PROXIMITY_DOCIDS),
|
||||
(prefix_word_pair_proximity_docids, PREFIX_WORD_PAIR_PROXIMITY_DOCIDS),
|
||||
(word_position_docids, WORD_POSITION_DOCIDS),
|
||||
(word_fid_docids, WORD_FIELD_ID_DOCIDS),
|
||||
(field_id_word_count_docids, FIELD_ID_WORD_COUNT_DOCIDS),
|
||||
(word_prefix_position_docids, WORD_PREFIX_POSITION_DOCIDS),
|
||||
(word_prefix_fid_docids, WORD_PREFIX_FIELD_ID_DOCIDS),
|
||||
(facet_id_f64_docids, FACET_ID_F64_DOCIDS),
|
||||
(facet_id_string_docids, FACET_ID_STRING_DOCIDS),
|
||||
(facet_id_exists_docids, FACET_ID_EXISTS_DOCIDS),
|
||||
(facet_id_is_null_docids, FACET_ID_IS_NULL_DOCIDS),
|
||||
(facet_id_is_empty_docids, FACET_ID_IS_EMPTY_DOCIDS),
|
||||
(field_id_docid_facet_f64s, FIELD_ID_DOCID_FACET_F64S),
|
||||
(field_id_docid_facet_strings, FIELD_ID_DOCID_FACET_STRINGS),
|
||||
(documents, DOCUMENTS),
|
||||
];
|
||||
|
||||
let rtxn = env.read_txn()?;
|
||||
let result: Result<Vec<_>, _> =
|
||||
list.into_iter().map(|(db, name)| compute_stats(&rtxn, db).map(|s| (s, name))).collect();
|
||||
let mut stats = result?;
|
||||
|
||||
println!("{:1$} Number of Entries", "", graph_radius as usize * 2);
|
||||
stats.sort_by_key(|(s, _)| Reverse(s.number_of_entries));
|
||||
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.number_of_entries as f32, *n)));
|
||||
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||
display_legend(&data);
|
||||
print!("\r\n");
|
||||
|
||||
println!("{:1$} Size of Entries", "", graph_radius as usize * 2);
|
||||
stats.sort_by_key(|(s, _)| Reverse(s.size_of_entries));
|
||||
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_entries as f32, *n)));
|
||||
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||
display_legend(&data);
|
||||
print!("\r\n");
|
||||
|
||||
println!("{:1$} Size of Data", "", graph_radius as usize * 2);
|
||||
stats.sort_by_key(|(s, _)| Reverse(s.size_of_data));
|
||||
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_data as f32, *n)));
|
||||
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||
display_legend(&data);
|
||||
print!("\r\n");
|
||||
|
||||
println!("{:1$} Size of Keys", "", graph_radius as usize * 2);
|
||||
stats.sort_by_key(|(s, _)| Reverse(s.size_of_keys));
|
||||
let data = compute_graph_data(stats.iter().map(|(s, n)| (s.size_of_keys as f32, *n)));
|
||||
Chart::new().radius(graph_radius).aspect_ratio(graph_aspect_ratio).draw(&data);
|
||||
display_legend(&data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn display_legend(data: &[Data]) {
|
||||
let total: f32 = data.iter().map(|d| d.value).sum();
|
||||
for Data { label, value, color, fill } in data {
|
||||
println!(
|
||||
"{} {} {:.02}%",
|
||||
color.unwrap().paint(fill.to_string()),
|
||||
label,
|
||||
value / total * 100.0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_graph_data<'a>(stats: impl IntoIterator<Item = (f32, &'a str)>) -> Vec<Data> {
|
||||
let mut colors = [
|
||||
Color::Red,
|
||||
Color::Green,
|
||||
Color::Yellow,
|
||||
Color::Blue,
|
||||
Color::Purple,
|
||||
Color::Cyan,
|
||||
Color::White,
|
||||
]
|
||||
.into_iter()
|
||||
.cycle();
|
||||
|
||||
let mut characters = ['▴', '▵', '▾', '▿', '▪', '▫', '•', '◦'].into_iter().cycle();
|
||||
|
||||
stats
|
||||
.into_iter()
|
||||
.map(|(value, name)| Data {
|
||||
label: (*name).into(),
|
||||
value,
|
||||
color: Some(colors.next().unwrap().into()),
|
||||
fill: characters.next().unwrap(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Stats {
|
||||
pub number_of_entries: u64,
|
||||
pub size_of_keys: u64,
|
||||
pub size_of_data: u64,
|
||||
pub size_of_entries: u64,
|
||||
}
|
||||
|
||||
fn compute_stats(rtxn: &RoTxn, db: PolyDatabase) -> anyhow::Result<Stats> {
|
||||
let mut number_of_entries = 0;
|
||||
let mut size_of_keys = 0;
|
||||
let mut size_of_data = 0;
|
||||
|
||||
for result in db.iter::<_, ByteSlice, ByteSlice>(rtxn)? {
|
||||
let (key, data) = result?;
|
||||
number_of_entries += 1;
|
||||
size_of_keys += key.len() as u64;
|
||||
size_of_data += data.len() as u64;
|
||||
}
|
||||
|
||||
Ok(Stats {
|
||||
number_of_entries,
|
||||
size_of_keys,
|
||||
size_of_data,
|
||||
size_of_entries: size_of_keys + size_of_data,
|
||||
})
|
||||
}
|
@ -45,11 +45,6 @@ impl AuthController {
|
||||
self.store.size()
|
||||
}
|
||||
|
||||
/// Return the used size of the `AuthController` database in bytes.
|
||||
pub fn used_size(&self) -> Result<u64> {
|
||||
self.store.used_size()
|
||||
}
|
||||
|
||||
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
||||
match self.store.get_api_key(create_key.uid)? {
|
||||
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
||||
|
@ -75,11 +75,6 @@ impl HeedAuthStore {
|
||||
Ok(self.env.real_disk_size()?)
|
||||
}
|
||||
|
||||
/// Return the number of bytes actually used in the database
|
||||
pub fn used_size(&self) -> Result<u64> {
|
||||
Ok(self.env.non_free_pages_size()?)
|
||||
}
|
||||
|
||||
pub fn set_drop_on_close(&mut self, v: bool) {
|
||||
self.should_close_on_drop = v;
|
||||
}
|
||||
|
@ -175,120 +175,122 @@ macro_rules! make_error_codes {
|
||||
|
||||
// An exhaustive list of all the error codes used by meilisearch.
|
||||
make_error_codes! {
|
||||
ApiKeyAlreadyExists , InvalidRequest , CONFLICT ;
|
||||
ApiKeyNotFound , InvalidRequest , NOT_FOUND ;
|
||||
BadParameter , InvalidRequest , BAD_REQUEST;
|
||||
BadRequest , InvalidRequest , BAD_REQUEST;
|
||||
DatabaseSizeLimitReached , Internal , INTERNAL_SERVER_ERROR;
|
||||
DocumentNotFound , InvalidRequest , NOT_FOUND;
|
||||
DumpAlreadyProcessing , InvalidRequest , CONFLICT;
|
||||
DumpNotFound , InvalidRequest , NOT_FOUND;
|
||||
DumpProcessFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||
DuplicateIndexFound , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyActions , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyExpiresAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyIndexes , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyKey , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyUid , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexUid , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
||||
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||
IndexNotFound , InvalidRequest , NOT_FOUND;
|
||||
IndexPrimaryKeyAlreadyExists , InvalidRequest , BAD_REQUEST ;
|
||||
IndexPrimaryKeyMultipleCandidatesFound, InvalidRequest , BAD_REQUEST;
|
||||
IndexPrimaryKeyNoCandidateFound , InvalidRequest , BAD_REQUEST ;
|
||||
Internal , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidApiKey , Auth , FORBIDDEN ;
|
||||
InvalidApiKeyActions , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyDescription , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyExpiresAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyName , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
||||
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToCrop , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToHighlight , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToRetrieve , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchCropLength , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchCropMarker , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHighlightPostTag , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHighlightPreTag , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHitsPerPage , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchMatchingStrategy , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchPage , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchQ , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchShowMatchesPosition , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFaceting , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFilterableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsPagination , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsRankingRules , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSearchableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSortableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsStopWords , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSynonyms , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsTypoTolerance , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidState , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidStoreFile , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidSwapDuplicateIndexFound , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterEnqueuedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterFinishedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterStartedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeEnqueuedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeFinishedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeStartedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskCanceledBy , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskFrom , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskStatuses , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskTypes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskUids , InvalidRequest , BAD_REQUEST ;
|
||||
IoError , System , UNPROCESSABLE_ENTITY;
|
||||
MalformedPayload , InvalidRequest , BAD_REQUEST ;
|
||||
MaxFieldsLimitExceeded , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyActions , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyExpiresAt , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
MissingAuthorizationHeader , Auth , UNAUTHORIZED ;
|
||||
MissingContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
MissingDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
MissingIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
MissingMasterKey , Auth , UNAUTHORIZED ;
|
||||
MissingPayload , InvalidRequest , BAD_REQUEST ;
|
||||
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
||||
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
||||
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
||||
TaskNotFound , InvalidRequest , NOT_FOUND ;
|
||||
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE
|
||||
ApiKeyAlreadyExists , InvalidRequest , CONFLICT ;
|
||||
ApiKeyNotFound , InvalidRequest , NOT_FOUND ;
|
||||
BadParameter , InvalidRequest , BAD_REQUEST;
|
||||
BadRequest , InvalidRequest , BAD_REQUEST;
|
||||
DatabaseSizeLimitReached , Internal , INTERNAL_SERVER_ERROR;
|
||||
DocumentNotFound , InvalidRequest , NOT_FOUND;
|
||||
DumpAlreadyProcessing , InvalidRequest , CONFLICT;
|
||||
DumpNotFound , InvalidRequest , NOT_FOUND;
|
||||
DumpProcessFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||
DuplicateIndexFound , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyActions , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyExpiresAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyIndexes , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyKey , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyUid , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableApiKeyUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexCreatedAt , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexUid , InvalidRequest , BAD_REQUEST;
|
||||
ImmutableIndexUpdatedAt , InvalidRequest , BAD_REQUEST;
|
||||
IndexAlreadyExists , InvalidRequest , CONFLICT ;
|
||||
IndexCreationFailed , Internal , INTERNAL_SERVER_ERROR;
|
||||
IndexNotFound , InvalidRequest , NOT_FOUND;
|
||||
IndexPrimaryKeyAlreadyExists , InvalidRequest , BAD_REQUEST ;
|
||||
IndexPrimaryKeyMultipleCandidatesFound , InvalidRequest , BAD_REQUEST;
|
||||
IndexPrimaryKeyNoCandidateFound , InvalidRequest , BAD_REQUEST ;
|
||||
Internal , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidApiKey , Auth , FORBIDDEN ;
|
||||
InvalidApiKeyActions , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyDescription , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyExpiresAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyName , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
||||
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToCrop , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToHighlight , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchAttributesToRetrieve , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchCropLength , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchCropMarker , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchFacets , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchFilter , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHighlightPostTag , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHighlightPreTag , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchHitsPerPage , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchMatchingStrategy , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchOffset , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchPage , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchQ , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchShowMatchesPosition , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSearchSort , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDisplayedAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsDistinctAttribute , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFilterableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsPagination , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsRankingRules , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSearchableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSortableAttributes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsStopWords , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsSynonyms , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsTypoTolerance , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFaceting , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFacetingMaxValuesPerFacet , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSettingsFacetingSortFacetValuesBy , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidState , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidStoreFile , Internal , INTERNAL_SERVER_ERROR ;
|
||||
InvalidSwapDuplicateIndexFound , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterEnqueuedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterFinishedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskAfterStartedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeEnqueuedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeFinishedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskBeforeStartedAt , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskCanceledBy , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskFrom , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskLimit , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskStatuses , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskTypes , InvalidRequest , BAD_REQUEST ;
|
||||
InvalidTaskUids , InvalidRequest , BAD_REQUEST ;
|
||||
IoError , System , UNPROCESSABLE_ENTITY;
|
||||
MalformedPayload , InvalidRequest , BAD_REQUEST ;
|
||||
MaxFieldsLimitExceeded , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyActions , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyExpiresAt , InvalidRequest , BAD_REQUEST ;
|
||||
MissingApiKeyIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
MissingAuthorizationHeader , Auth , UNAUTHORIZED ;
|
||||
MissingContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||
MissingDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||
MissingIndexUid , InvalidRequest , BAD_REQUEST ;
|
||||
MissingMasterKey , Auth , UNAUTHORIZED ;
|
||||
MissingPayload , InvalidRequest , BAD_REQUEST ;
|
||||
MissingSwapIndexes , InvalidRequest , BAD_REQUEST ;
|
||||
MissingTaskFilters , InvalidRequest , BAD_REQUEST ;
|
||||
NoSpaceLeftOnDevice , System , UNPROCESSABLE_ENTITY;
|
||||
PayloadTooLarge , InvalidRequest , PAYLOAD_TOO_LARGE ;
|
||||
TaskNotFound , InvalidRequest , NOT_FOUND ;
|
||||
TooManyOpenFiles , System , UNPROCESSABLE_ENTITY ;
|
||||
UnretrievableDocument , Internal , BAD_REQUEST ;
|
||||
UnretrievableErrorCode , InvalidRequest , BAD_REQUEST ;
|
||||
UnsupportedMediaType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE
|
||||
}
|
||||
|
||||
impl ErrorCode for JoinError {
|
||||
|
33
meilisearch-types/src/facet_values_sort.rs
Normal file
33
meilisearch-types/src/facet_values_sort.rs
Normal file
@ -0,0 +1,33 @@
|
||||
use deserr::Deserr;
|
||||
use milli::OrderBy;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Deserr)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[deserr(rename_all = camelCase)]
|
||||
pub enum FacetValuesSort {
|
||||
/// Facet values are sorted in alphabetical order, ascending from A to Z.
|
||||
#[default]
|
||||
Alpha,
|
||||
/// Facet values are sorted by decreasing count.
|
||||
/// The count is the number of records containing this facet value in the results of the query.
|
||||
Count,
|
||||
}
|
||||
|
||||
impl From<FacetValuesSort> for OrderBy {
|
||||
fn from(val: FacetValuesSort) -> Self {
|
||||
match val {
|
||||
FacetValuesSort::Alpha => OrderBy::Lexicographic,
|
||||
FacetValuesSort::Count => OrderBy::Count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OrderBy> for FacetValuesSort {
|
||||
fn from(val: OrderBy) -> Self {
|
||||
match val {
|
||||
OrderBy::Lexicographic => FacetValuesSort::Alpha,
|
||||
OrderBy::Count => FacetValuesSort::Count,
|
||||
}
|
||||
}
|
||||
}
|
@ -2,6 +2,7 @@ pub mod compression;
|
||||
pub mod deserr;
|
||||
pub mod document_formats;
|
||||
pub mod error;
|
||||
pub mod facet_values_sort;
|
||||
pub mod index_uid;
|
||||
pub mod index_uid_pattern;
|
||||
pub mod keys;
|
||||
|
@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize, Serializer};
|
||||
|
||||
use crate::deserr::DeserrJsonError;
|
||||
use crate::error::deserr_codes::*;
|
||||
use crate::facet_values_sort::FacetValuesSort;
|
||||
|
||||
/// The maximimum number of results that the engine
|
||||
/// will be able to return in one search call.
|
||||
@ -97,11 +98,14 @@ pub struct TypoSettings {
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq, Deserr)]
|
||||
#[serde(deny_unknown_fields, rename_all = "camelCase")]
|
||||
#[deserr(rename_all = camelCase, deny_unknown_fields)]
|
||||
#[deserr(deny_unknown_fields, rename_all = camelCase, where_predicate = __Deserr_E: deserr::MergeWithError<DeserrJsonError<InvalidSettingsFaceting>> + deserr::MergeWithError<DeserrJsonError<InvalidSettingsFacetingMaxValuesPerFacet>> + deserr::MergeWithError<DeserrJsonError<InvalidSettingsFacetingSortFacetValuesBy>>)]
|
||||
pub struct FacetingSettings {
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default)]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsFacetingMaxValuesPerFacet>)]
|
||||
pub max_values_per_facet: Setting<usize>,
|
||||
#[serde(default, skip_serializing_if = "Setting::is_not_set")]
|
||||
#[deserr(default, error = DeserrJsonError<InvalidSettingsFacetingSortFacetValuesBy>)]
|
||||
pub sort_facet_values_by: Setting<BTreeMap<String, FacetValuesSort>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq, Deserr)]
|
||||
@ -398,12 +402,21 @@ pub fn apply_settings_to_builder(
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
match settings.faceting {
|
||||
Setting::Set(ref value) => match value.max_values_per_facet {
|
||||
Setting::Set(val) => builder.set_max_values_per_facet(val),
|
||||
Setting::Reset => builder.reset_max_values_per_facet(),
|
||||
Setting::NotSet => (),
|
||||
},
|
||||
match &settings.faceting {
|
||||
Setting::Set(FacetingSettings { max_values_per_facet, sort_facet_values_by }) => {
|
||||
match max_values_per_facet {
|
||||
Setting::Set(val) => builder.set_max_values_per_facet(*val),
|
||||
Setting::Reset => builder.reset_max_values_per_facet(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
match sort_facet_values_by {
|
||||
Setting::Set(val) => builder.set_sort_facet_values_by(
|
||||
val.iter().map(|(name, order)| (name.clone(), (*order).into())).collect(),
|
||||
),
|
||||
Setting::Reset => builder.reset_sort_facet_values_by(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
}
|
||||
Setting::Reset => builder.reset_max_values_per_facet(),
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
@ -476,6 +489,13 @@ pub fn settings(
|
||||
max_values_per_facet: Setting::Set(
|
||||
index.max_values_per_facet(rtxn)?.unwrap_or(DEFAULT_VALUES_PER_FACET),
|
||||
),
|
||||
sort_facet_values_by: Setting::Set(
|
||||
index
|
||||
.sort_facet_values_by(rtxn)?
|
||||
.into_iter()
|
||||
.map(|(name, sort)| (name, sort.into()))
|
||||
.collect(),
|
||||
),
|
||||
};
|
||||
|
||||
let pagination = PaginationSettings {
|
||||
|
@ -395,6 +395,7 @@ impl std::error::Error for ParseTaskStatusError {}
|
||||
pub enum Kind {
|
||||
DocumentAdditionOrUpdate,
|
||||
DocumentDeletion,
|
||||
DocumentDeletionByFilter,
|
||||
SettingsUpdate,
|
||||
IndexCreation,
|
||||
IndexDeletion,
|
||||
@ -411,6 +412,7 @@ impl Kind {
|
||||
match self {
|
||||
Kind::DocumentAdditionOrUpdate
|
||||
| Kind::DocumentDeletion
|
||||
| Kind::DocumentDeletionByFilter
|
||||
| Kind::SettingsUpdate
|
||||
| Kind::IndexCreation
|
||||
| Kind::IndexDeletion
|
||||
@ -428,6 +430,7 @@ impl Display for Kind {
|
||||
match self {
|
||||
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
|
||||
Kind::DocumentDeletion => write!(f, "documentDeletion"),
|
||||
Kind::DocumentDeletionByFilter => write!(f, "documentDeletionByFilter"),
|
||||
Kind::SettingsUpdate => write!(f, "settingsUpdate"),
|
||||
Kind::IndexCreation => write!(f, "indexCreation"),
|
||||
Kind::IndexDeletion => write!(f, "indexDeletion"),
|
||||
|
@ -14,14 +14,27 @@ default-run = "meilisearch"
|
||||
|
||||
[dependencies]
|
||||
actix-cors = "0.6.4"
|
||||
actix-http = { version = "3.3.1", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls"] }
|
||||
actix-web = { version = "4.3.1", default-features = false, features = ["macros", "compress-brotli", "compress-gzip", "cookies", "rustls"] }
|
||||
actix-http = { version = "3.3.1", default-features = false, features = [
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
"rustls",
|
||||
] }
|
||||
actix-web = { version = "4.3.1", default-features = false, features = [
|
||||
"macros",
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
"cookies",
|
||||
"rustls",
|
||||
] }
|
||||
actix-web-static-files = { git = "https://github.com/kilork/actix-web-static-files.git", rev = "2d3b6160", optional = true }
|
||||
anyhow = { version = "1.0.70", features = ["backtrace"] }
|
||||
async-stream = "0.3.5"
|
||||
async-trait = "0.1.68"
|
||||
bstr = "1.4.0"
|
||||
byte-unit = { version = "4.0.19", default-features = false, features = ["std", "serde"] }
|
||||
byte-unit = { version = "4.0.19", default-features = false, features = [
|
||||
"std",
|
||||
"serde",
|
||||
] }
|
||||
bytes = "1.4.0"
|
||||
clap = { version = "4.2.1", features = ["derive", "env"] }
|
||||
crossbeam-channel = "0.5.8"
|
||||
@ -56,7 +69,10 @@ prometheus = { version = "0.13.3", features = ["process"] }
|
||||
rand = "0.8.5"
|
||||
rayon = "1.7.0"
|
||||
regex = "1.7.3"
|
||||
reqwest = { version = "0.11.16", features = ["rustls-tls", "json"], default-features = false }
|
||||
reqwest = { version = "0.11.16", features = [
|
||||
"rustls-tls",
|
||||
"json",
|
||||
], default-features = false }
|
||||
rustls = "0.20.8"
|
||||
rustls-pemfile = "1.0.2"
|
||||
segment = { version = "0.2.2", optional = true }
|
||||
@ -70,7 +86,12 @@ sysinfo = "0.28.4"
|
||||
tar = "0.4.38"
|
||||
tempfile = "3.5.0"
|
||||
thiserror = "1.0.40"
|
||||
time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
time = { version = "0.3.20", features = [
|
||||
"serde-well-known",
|
||||
"formatting",
|
||||
"parsing",
|
||||
"macros",
|
||||
] }
|
||||
tokio = { version = "1.27.0", features = ["full"] }
|
||||
tokio-stream = "0.1.12"
|
||||
toml = "0.7.3"
|
||||
@ -89,7 +110,7 @@ brotli = "3.3.4"
|
||||
insta = "1.29.0"
|
||||
manifest-dir-macros = "0.1.16"
|
||||
maplit = "1.0.2"
|
||||
meili-snap = {path = "../meili-snap"}
|
||||
meili-snap = { path = "../meili-snap" }
|
||||
temp-env = "0.3.3"
|
||||
urlencoding = "2.1.2"
|
||||
yaup = "0.2.1"
|
||||
@ -98,7 +119,10 @@ yaup = "0.2.1"
|
||||
anyhow = { version = "1.0.70", optional = true }
|
||||
cargo_toml = { version = "0.15.2", optional = true }
|
||||
hex = { version = "0.4.3", optional = true }
|
||||
reqwest = { version = "0.11.16", features = ["blocking", "rustls-tls"], default-features = false, optional = true }
|
||||
reqwest = { version = "0.11.16", features = [
|
||||
"blocking",
|
||||
"rustls-tls",
|
||||
], default-features = false, optional = true }
|
||||
sha-1 = { version = "0.10.1", optional = true }
|
||||
static-files = { version = "0.2.3", optional = true }
|
||||
tempfile = { version = "3.5.0", optional = true }
|
||||
@ -108,7 +132,17 @@ zip = { version = "0.6.4", optional = true }
|
||||
[features]
|
||||
default = ["analytics", "meilisearch-types/all-tokenizations", "mini-dashboard"]
|
||||
analytics = ["segment"]
|
||||
mini-dashboard = ["actix-web-static-files", "static-files", "anyhow", "cargo_toml", "hex", "reqwest", "sha-1", "tempfile", "zip"]
|
||||
mini-dashboard = [
|
||||
"actix-web-static-files",
|
||||
"static-files",
|
||||
"anyhow",
|
||||
"cargo_toml",
|
||||
"hex",
|
||||
"reqwest",
|
||||
"sha-1",
|
||||
"tempfile",
|
||||
"zip",
|
||||
]
|
||||
chinese = ["meilisearch-types/chinese"]
|
||||
hebrew = ["meilisearch-types/hebrew"]
|
||||
japanese = ["meilisearch-types/japanese"]
|
||||
|
@ -4,32 +4,20 @@ use prometheus::{
|
||||
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
||||
};
|
||||
|
||||
/// Create evenly distributed buckets
|
||||
fn create_buckets() -> [f64; 29] {
|
||||
(0..10)
|
||||
.chain((10..100).step_by(10))
|
||||
.chain((100..=1000).step_by(100))
|
||||
.map(|i| i as f64 / 1000.)
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
const HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: &[f64; 14] = &[
|
||||
0.0005, 0.0008, 0.00085, 0.0009, 0.00095, 0.001, 0.00105, 0.0011, 0.00115, 0.0012, 0.0015,
|
||||
0.002, 0.003, 1.0,
|
||||
];
|
||||
|
||||
lazy_static! {
|
||||
pub static ref HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: [f64; 29] = create_buckets();
|
||||
pub static ref MEILISEARCH_HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
opts!("meilisearch_http_requests_total", "Meilisearch HTTP requests total"),
|
||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
opts!("http_requests_total", "HTTP requests total"),
|
||||
&["method", "path"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
|
||||
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch DB Size In Bytes"))
|
||||
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch Db Size In Bytes"))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_USED_DB_SIZE_BYTES: IntGauge = register_int_gauge!(opts!(
|
||||
"meilisearch_used_db_size_bytes",
|
||||
"Meilisearch Used DB Size In Bytes"
|
||||
))
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
||||
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
||||
.expect("Can't create a metric");
|
||||
@ -38,16 +26,11 @@ lazy_static! {
|
||||
&["index"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
||||
pub static ref HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
||||
"http_response_time_seconds",
|
||||
"HTTP response times",
|
||||
&["method", "path"],
|
||||
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
pub static ref MEILISEARCH_NB_TASKS: IntGaugeVec = register_int_gauge_vec!(
|
||||
opts!("meilisearch_nb_tasks", "Meilisearch Number of tasks"),
|
||||
&["kind", "value"]
|
||||
)
|
||||
.expect("Can't create a metric");
|
||||
}
|
||||
|
@ -52,11 +52,11 @@ where
|
||||
if is_registered_resource {
|
||||
let request_method = req.method().to_string();
|
||||
histogram_timer = Some(
|
||||
crate::metrics::MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS
|
||||
crate::metrics::HTTP_RESPONSE_TIME_SECONDS
|
||||
.with_label_values(&[&request_method, request_path])
|
||||
.start_timer(),
|
||||
);
|
||||
crate::metrics::MEILISEARCH_HTTP_REQUESTS_TOTAL
|
||||
crate::metrics::HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&[&request_method, request_path])
|
||||
.inc();
|
||||
}
|
||||
|
@ -407,6 +407,7 @@ make_setting_route!(
|
||||
json!({
|
||||
"faceting": {
|
||||
"max_values_per_facet": setting.as_ref().and_then(|s| s.max_values_per_facet.set()),
|
||||
"sort_facet_values_by": setting.as_ref().and_then(|s| s.sort_facet_values_by.clone().set()),
|
||||
},
|
||||
}),
|
||||
Some(req),
|
||||
@ -545,6 +546,10 @@ pub async fn update_all(
|
||||
.as_ref()
|
||||
.set()
|
||||
.and_then(|s| s.max_values_per_facet.as_ref().set()),
|
||||
"sort_facet_values_by": new_settings.faceting
|
||||
.as_ref()
|
||||
.set()
|
||||
.and_then(|s| s.sort_facet_values_by.as_ref().set()),
|
||||
},
|
||||
"pagination": {
|
||||
"max_total_hits": new_settings.pagination
|
||||
|
@ -17,7 +17,7 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
||||
|
||||
pub async fn get_metrics(
|
||||
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
||||
auth_controller: Data<AuthController>,
|
||||
auth_controller: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<AuthController>>,
|
||||
) -> Result<HttpResponse, ResponseError> {
|
||||
let auth_filters = index_scheduler.filters();
|
||||
if !auth_filters.all_indexes_authorized() {
|
||||
@ -28,10 +28,10 @@ pub async fn get_metrics(
|
||||
return Err(error);
|
||||
}
|
||||
|
||||
let response = create_all_stats((*index_scheduler).clone(), auth_controller, auth_filters)?;
|
||||
let response =
|
||||
create_all_stats((*index_scheduler).clone(), (*auth_controller).clone(), auth_filters)?;
|
||||
|
||||
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
||||
crate::metrics::MEILISEARCH_USED_DB_SIZE_BYTES.set(response.used_database_size as i64);
|
||||
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
||||
|
||||
for (index, value) in response.indexes.iter() {
|
||||
@ -40,14 +40,6 @@ pub async fn get_metrics(
|
||||
.set(value.number_of_documents as i64);
|
||||
}
|
||||
|
||||
for (kind, value) in index_scheduler.get_stats()? {
|
||||
for (value, count) in value {
|
||||
crate::metrics::MEILISEARCH_NB_TASKS
|
||||
.with_label_values(&[&kind, &value])
|
||||
.set(count as i64);
|
||||
}
|
||||
}
|
||||
|
||||
let encoder = TextEncoder::new();
|
||||
let mut buffer = vec![];
|
||||
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
||||
|
@ -231,8 +231,6 @@ pub async fn running() -> HttpResponse {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Stats {
|
||||
pub database_size: u64,
|
||||
#[serde(skip)]
|
||||
pub used_database_size: u64,
|
||||
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
||||
pub last_update: Option<OffsetDateTime>,
|
||||
pub indexes: BTreeMap<String, indexes::IndexStats>,
|
||||
@ -261,7 +259,6 @@ pub fn create_all_stats(
|
||||
let mut last_task: Option<OffsetDateTime> = None;
|
||||
let mut indexes = BTreeMap::new();
|
||||
let mut database_size = 0;
|
||||
let mut used_database_size = 0;
|
||||
|
||||
for index_uid in index_scheduler.index_names()? {
|
||||
// Accumulate the size of all indexes, even unauthorized ones, so
|
||||
@ -269,7 +266,6 @@ pub fn create_all_stats(
|
||||
// See <https://github.com/meilisearch/meilisearch/pull/3541#discussion_r1126747643> for context.
|
||||
let stats = index_scheduler.index_stats(&index_uid)?;
|
||||
database_size += stats.inner_stats.database_size;
|
||||
used_database_size += stats.inner_stats.used_database_size;
|
||||
|
||||
if !filters.is_index_authorized(&index_uid) {
|
||||
continue;
|
||||
@ -282,14 +278,10 @@ pub fn create_all_stats(
|
||||
}
|
||||
|
||||
database_size += index_scheduler.size()?;
|
||||
used_database_size += index_scheduler.used_size()?;
|
||||
database_size += auth_controller.size()?;
|
||||
used_database_size += auth_controller.used_size()?;
|
||||
let update_file_size = index_scheduler.compute_update_file_size()?;
|
||||
database_size += update_file_size;
|
||||
used_database_size += update_file_size;
|
||||
database_size += index_scheduler.compute_update_file_size()?;
|
||||
|
||||
let stats = Stats { database_size, used_database_size, last_update: last_task, indexes };
|
||||
let stats = Stats { database_size, last_update: last_task, indexes };
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
|
@ -730,7 +730,7 @@ mod tests {
|
||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||
snapshot!(meili_snap::json_string!(err), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
|
@ -5,6 +5,7 @@ use std::time::Instant;
|
||||
|
||||
use deserr::Deserr;
|
||||
use either::Either;
|
||||
use indexmap::IndexMap;
|
||||
use meilisearch_auth::IndexSearchRules;
|
||||
use meilisearch_types::deserr::DeserrJsonError;
|
||||
use meilisearch_types::error::deserr_codes::*;
|
||||
@ -213,7 +214,7 @@ pub struct SearchResult {
|
||||
#[serde(flatten)]
|
||||
pub hits_info: HitsInfo,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub facet_distribution: Option<BTreeMap<String, BTreeMap<String, u64>>>,
|
||||
pub facet_distribution: Option<BTreeMap<String, IndexMap<String, u64>>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub facet_stats: Option<BTreeMap<String, FacetStats>>,
|
||||
}
|
||||
@ -448,10 +449,30 @@ pub fn perform_search(
|
||||
.unwrap_or(DEFAULT_VALUES_PER_FACET);
|
||||
facet_distribution.max_values_per_facet(max_values_by_facet);
|
||||
|
||||
let sort_facet_values_by =
|
||||
index.sort_facet_values_by(&rtxn).map_err(milli::Error::from)?;
|
||||
let default_sort_facet_values_by =
|
||||
sort_facet_values_by.get("*").copied().unwrap_or_default();
|
||||
|
||||
if fields.iter().all(|f| f != "*") {
|
||||
let fields: Vec<_> = fields
|
||||
.into_iter()
|
||||
.map(|n| {
|
||||
(
|
||||
n,
|
||||
sort_facet_values_by
|
||||
.get(n)
|
||||
.copied()
|
||||
.unwrap_or(default_sort_facet_values_by),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
facet_distribution.facets(fields);
|
||||
}
|
||||
let distribution = facet_distribution.candidates(candidates).execute()?;
|
||||
let distribution = facet_distribution
|
||||
.candidates(candidates)
|
||||
.default_order_by(default_sort_facet_values_by)
|
||||
.execute()?;
|
||||
let stats = facet_distribution.compute_stats()?;
|
||||
(Some(distribution), Some(stats))
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ async fn task_bad_types() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
@ -108,7 +108,7 @@ async fn task_bad_types() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
@ -119,7 +119,7 @@ async fn task_bad_types() {
|
||||
snapshot!(code, @"400 Bad Request");
|
||||
snapshot!(json_string!(response), @r###"
|
||||
{
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||
"code": "invalid_task_types",
|
||||
"type": "invalid_request",
|
||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||
|
@ -32,6 +32,7 @@ heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.6", default-f
|
||||
"lmdb",
|
||||
"sync-read-txn",
|
||||
] }
|
||||
indexmap = { version = "1.9.3", features = ["serde"] }
|
||||
json-depth-checker = { path = "../json-depth-checker" }
|
||||
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
||||
memmap2 = "0.5.10"
|
||||
@ -75,6 +76,9 @@ maplit = "1.0.2"
|
||||
md5 = "0.7.0"
|
||||
rand = { version = "0.8.5", features = ["small_rng"] }
|
||||
|
||||
[target.'cfg(fuzzing)'.dev-dependencies]
|
||||
fuzzcheck = "0.12.1"
|
||||
|
||||
[features]
|
||||
all-tokenizations = ["charabia/default"]
|
||||
|
||||
|
@ -111,6 +111,7 @@ pub enum Error {
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn objects_from_json_value(json: serde_json::Value) -> Vec<crate::Object> {
|
||||
let documents = match json {
|
||||
object @ serde_json::Value::Object(_) => vec![object],
|
||||
@ -140,6 +141,7 @@ macro_rules! documents {
|
||||
}};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn documents_batch_reader_from_objects(
|
||||
objects: impl IntoIterator<Item = Object>,
|
||||
) -> DocumentsBatchReader<std::io::Cursor<Vec<u8>>> {
|
||||
|
@ -106,30 +106,22 @@ impl<'a> ExternalDocumentsIds<'a> {
|
||||
map
|
||||
}
|
||||
|
||||
/// Return an fst of the combined hard and soft deleted ID.
|
||||
pub fn to_fst<'b>(&'b self) -> fst::Result<Cow<'b, fst::Map<Cow<'a, [u8]>>>> {
|
||||
if self.soft.is_empty() {
|
||||
return Ok(Cow::Borrowed(&self.hard));
|
||||
}
|
||||
let union_op = self.hard.op().add(&self.soft).r#union();
|
||||
|
||||
let mut iter = union_op.into_stream();
|
||||
let mut new_hard_builder = fst::MapBuilder::memory();
|
||||
while let Some((external_id, marked_docids)) = iter.next() {
|
||||
let value = indexed_last_value(marked_docids).unwrap();
|
||||
if value != DELETED_ID {
|
||||
new_hard_builder.insert(external_id, value)?;
|
||||
}
|
||||
}
|
||||
|
||||
drop(iter);
|
||||
|
||||
Ok(Cow::Owned(new_hard_builder.into_map().map_data(Cow::Owned)?))
|
||||
}
|
||||
|
||||
fn merge_soft_into_hard(&mut self) -> fst::Result<()> {
|
||||
if self.soft.len() >= self.hard.len() / 2 {
|
||||
self.hard = self.to_fst()?.into_owned();
|
||||
let union_op = self.hard.op().add(&self.soft).r#union();
|
||||
|
||||
let mut iter = union_op.into_stream();
|
||||
let mut new_hard_builder = fst::MapBuilder::memory();
|
||||
while let Some((external_id, marked_docids)) = iter.next() {
|
||||
let value = indexed_last_value(marked_docids).unwrap();
|
||||
if value != DELETED_ID {
|
||||
new_hard_builder.insert(external_id, value)?;
|
||||
}
|
||||
}
|
||||
|
||||
drop(iter);
|
||||
|
||||
self.hard = new_hard_builder.into_map().map_data(Cow::Owned)?;
|
||||
self.soft = fst::Map::default().map_data(Cow::Owned)?;
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ impl CboRoaringBitmapCodec {
|
||||
} else {
|
||||
// Otherwise, it means we used the classic RoaringBitmapCodec and
|
||||
// that the header takes threshold integers.
|
||||
RoaringBitmap::deserialize_unchecked_from(bytes)
|
||||
RoaringBitmap::deserialize_from(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ impl CboRoaringBitmapCodec {
|
||||
vec.push(integer);
|
||||
}
|
||||
} else {
|
||||
roaring |= RoaringBitmap::deserialize_unchecked_from(bytes.as_ref())?;
|
||||
roaring |= RoaringBitmap::deserialize_from(bytes.as_ref())?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,7 +8,7 @@ impl heed::BytesDecode<'_> for RoaringBitmapCodec {
|
||||
type DItem = RoaringBitmap;
|
||||
|
||||
fn bytes_decode(bytes: &[u8]) -> Option<Self::DItem> {
|
||||
RoaringBitmap::deserialize_unchecked_from(bytes).ok()
|
||||
RoaringBitmap::deserialize_from(bytes).ok()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,9 +21,10 @@ use crate::heed_codec::facet::{
|
||||
};
|
||||
use crate::heed_codec::{ScriptLanguageCodec, StrBEU16Codec, StrRefCodec};
|
||||
use crate::{
|
||||
default_criteria, CboRoaringBitmapCodec, Criterion, DocumentId, ExternalDocumentsIds,
|
||||
FacetDistribution, FieldDistribution, FieldId, FieldIdWordCountCodec, GeoPoint, ObkvCodec,
|
||||
Result, RoaringBitmapCodec, RoaringBitmapLenCodec, Search, U8StrStrCodec, BEU16, BEU32,
|
||||
default_criteria, BEU32StrCodec, BoRoaringBitmapCodec, CboRoaringBitmapCodec, Criterion,
|
||||
DocumentId, ExternalDocumentsIds, FacetDistribution, FieldDistribution, FieldId,
|
||||
FieldIdWordCountCodec, GeoPoint, ObkvCodec, OrderBy, Result, RoaringBitmapCodec,
|
||||
RoaringBitmapLenCodec, Search, U8StrStrCodec, BEU16, BEU32,
|
||||
};
|
||||
|
||||
pub const DEFAULT_MIN_WORD_LEN_ONE_TYPO: u8 = 5;
|
||||
@ -61,6 +62,7 @@ pub mod main_key {
|
||||
pub const EXACT_WORDS: &str = "exact-words";
|
||||
pub const EXACT_ATTRIBUTES: &str = "exact-attributes";
|
||||
pub const MAX_VALUES_PER_FACET: &str = "max-values-per-facet";
|
||||
pub const SORT_FACET_VALUES_BY: &str = "sort-facet-values-by";
|
||||
pub const PAGINATION_MAX_TOTAL_HITS: &str = "pagination-max-total-hits";
|
||||
}
|
||||
|
||||
@ -93,10 +95,10 @@ pub mod db_name {
|
||||
#[derive(Clone)]
|
||||
pub struct Index {
|
||||
/// The LMDB environment which this index is associated with.
|
||||
pub env: heed::Env,
|
||||
pub(crate) env: heed::Env,
|
||||
|
||||
/// Contains many different types (e.g. the fields ids map).
|
||||
pub main: PolyDatabase,
|
||||
pub(crate) main: PolyDatabase,
|
||||
|
||||
/// A word and all the documents ids containing the word.
|
||||
pub word_docids: Database<Str, RoaringBitmapCodec>,
|
||||
@ -110,6 +112,9 @@ pub struct Index {
|
||||
/// A prefix of word and all the documents ids containing this prefix, from attributes for which typos are not allowed.
|
||||
pub exact_word_prefix_docids: Database<Str, RoaringBitmapCodec>,
|
||||
|
||||
/// Maps a word and a document id (u32) to all the positions where the given word appears.
|
||||
pub docid_word_positions: Database<BEU32StrCodec, BoRoaringBitmapCodec>,
|
||||
|
||||
/// Maps the proximity between a pair of words with all the docids where this relation appears.
|
||||
pub word_pair_proximity_docids: Database<U8StrStrCodec, CboRoaringBitmapCodec>,
|
||||
/// Maps the proximity between a pair of word and prefix with all the docids where this relation appears.
|
||||
@ -150,7 +155,7 @@ pub struct Index {
|
||||
pub field_id_docid_facet_strings: Database<FieldDocIdFacetStringCodec, Str>,
|
||||
|
||||
/// Maps the document id to the document as an obkv store.
|
||||
pub documents: Database<OwnedType<BEU32>, ObkvCodec>,
|
||||
pub(crate) documents: Database<OwnedType<BEU32>, ObkvCodec>,
|
||||
}
|
||||
|
||||
impl Index {
|
||||
@ -173,6 +178,7 @@ impl Index {
|
||||
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||
let exact_word_prefix_docids =
|
||||
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||
let docid_word_positions = env.create_database(&mut wtxn, Some(DOCID_WORD_POSITIONS))?;
|
||||
let word_pair_proximity_docids =
|
||||
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||
let script_language_docids =
|
||||
@ -215,6 +221,7 @@ impl Index {
|
||||
exact_word_docids,
|
||||
word_prefix_docids,
|
||||
exact_word_prefix_docids,
|
||||
docid_word_positions,
|
||||
word_pair_proximity_docids,
|
||||
script_language_docids,
|
||||
word_prefix_pair_proximity_docids,
|
||||
@ -1228,6 +1235,31 @@ impl Index {
|
||||
self.main.delete::<_, Str>(txn, main_key::MAX_VALUES_PER_FACET)
|
||||
}
|
||||
|
||||
pub fn sort_facet_values_by(&self, txn: &RoTxn) -> heed::Result<HashMap<String, OrderBy>> {
|
||||
let mut orders = self
|
||||
.main
|
||||
.get::<_, Str, SerdeJson<HashMap<String, OrderBy>>>(
|
||||
txn,
|
||||
main_key::SORT_FACET_VALUES_BY,
|
||||
)?
|
||||
.unwrap_or_default();
|
||||
// Insert the default ordering if it is not already overwritten by the user.
|
||||
orders.entry("*".to_string()).or_insert(OrderBy::Lexicographic);
|
||||
Ok(orders)
|
||||
}
|
||||
|
||||
pub(crate) fn put_sort_facet_values_by(
|
||||
&self,
|
||||
txn: &mut RwTxn,
|
||||
val: &HashMap<String, OrderBy>,
|
||||
) -> heed::Result<()> {
|
||||
self.main.put::<_, Str, SerdeJson<_>>(txn, main_key::SORT_FACET_VALUES_BY, &val)
|
||||
}
|
||||
|
||||
pub(crate) fn delete_sort_facet_values_by(&self, txn: &mut RwTxn) -> heed::Result<bool> {
|
||||
self.main.delete::<_, Str>(txn, main_key::SORT_FACET_VALUES_BY)
|
||||
}
|
||||
|
||||
pub fn pagination_max_total_hits(&self, txn: &RoTxn) -> heed::Result<Option<usize>> {
|
||||
self.main.get::<_, Str, OwnedType<usize>>(txn, main_key::PAGINATION_MAX_TOTAL_HITS)
|
||||
}
|
||||
@ -1466,9 +1498,9 @@ pub(crate) mod tests {
|
||||
|
||||
db_snap!(index, field_distribution,
|
||||
@r###"
|
||||
age 1 |
|
||||
id 2 |
|
||||
name 2 |
|
||||
age 1
|
||||
id 2
|
||||
name 2
|
||||
"###
|
||||
);
|
||||
|
||||
@ -1486,9 +1518,9 @@ pub(crate) mod tests {
|
||||
|
||||
db_snap!(index, field_distribution,
|
||||
@r###"
|
||||
age 1 |
|
||||
id 2 |
|
||||
name 2 |
|
||||
age 1
|
||||
id 2
|
||||
name 2
|
||||
"###
|
||||
);
|
||||
|
||||
@ -1502,9 +1534,9 @@ pub(crate) mod tests {
|
||||
|
||||
db_snap!(index, field_distribution,
|
||||
@r###"
|
||||
has_dog 1 |
|
||||
id 2 |
|
||||
name 2 |
|
||||
has_dog 1
|
||||
id 2
|
||||
name 2
|
||||
"###
|
||||
);
|
||||
}
|
||||
|
@ -5,6 +5,52 @@
|
||||
#[global_allocator]
|
||||
pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
// #[cfg(test)]
|
||||
// pub mod allocator {
|
||||
// use std::alloc::{GlobalAlloc, System};
|
||||
// use std::sync::atomic::{self, AtomicI64};
|
||||
|
||||
// #[global_allocator]
|
||||
// pub static ALLOC: CountingAlloc = CountingAlloc {
|
||||
// max_resident: AtomicI64::new(0),
|
||||
// resident: AtomicI64::new(0),
|
||||
// allocated: AtomicI64::new(0),
|
||||
// };
|
||||
|
||||
// pub struct CountingAlloc {
|
||||
// pub max_resident: AtomicI64,
|
||||
// pub resident: AtomicI64,
|
||||
// pub allocated: AtomicI64,
|
||||
// }
|
||||
// unsafe impl GlobalAlloc for CountingAlloc {
|
||||
// unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
|
||||
// self.allocated.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
||||
// let old_resident =
|
||||
// self.resident.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
||||
|
||||
// let resident = old_resident + layout.size() as i64;
|
||||
// self.max_resident.fetch_max(resident, atomic::Ordering::SeqCst);
|
||||
|
||||
// // if layout.size() > 1_000_000 {
|
||||
// // eprintln!(
|
||||
// // "allocating {} with new resident size: {resident}",
|
||||
// // layout.size() / 1_000_000
|
||||
// // );
|
||||
// // // let trace = std::backtrace::Backtrace::capture();
|
||||
// // // let t = trace.to_string();
|
||||
// // // eprintln!("{t}");
|
||||
// // }
|
||||
|
||||
// System.alloc(layout)
|
||||
// }
|
||||
|
||||
// unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
|
||||
// self.resident.fetch_sub(layout.size() as i64, atomic::Ordering::Relaxed);
|
||||
// System.dealloc(ptr, layout)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
#[macro_use]
|
||||
pub mod documents;
|
||||
|
||||
@ -53,8 +99,8 @@ pub use self::heed_codec::{
|
||||
};
|
||||
pub use self::index::Index;
|
||||
pub use self::search::{
|
||||
FacetDistribution, Filter, FormatOptions, MatchBounds, MatcherBuilder, MatchingWords, Search,
|
||||
SearchResult, TermsMatchingStrategy, DEFAULT_VALUES_PER_FACET,
|
||||
FacetDistribution, Filter, FormatOptions, MatchBounds, MatcherBuilder, MatchingWords, OrderBy,
|
||||
Search, SearchResult, TermsMatchingStrategy, DEFAULT_VALUES_PER_FACET,
|
||||
};
|
||||
|
||||
pub type Result<T> = std::result::Result<T, error::Error>;
|
||||
|
@ -1,19 +1,22 @@
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::ops::ControlFlow;
|
||||
use std::{fmt, mem};
|
||||
|
||||
use heed::types::ByteSlice;
|
||||
use heed::BytesDecode;
|
||||
use indexmap::IndexMap;
|
||||
use roaring::RoaringBitmap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::UserError;
|
||||
use crate::facet::FacetType;
|
||||
use crate::heed_codec::facet::{
|
||||
FacetGroupKeyCodec, FacetGroupValueCodec, FieldDocIdFacetF64Codec, FieldDocIdFacetStringCodec,
|
||||
OrderedF64Codec,
|
||||
FacetGroupKeyCodec, FieldDocIdFacetF64Codec, FieldDocIdFacetStringCodec, OrderedF64Codec,
|
||||
};
|
||||
use crate::heed_codec::{ByteSliceRefCodec, StrRefCodec};
|
||||
use crate::search::facet::facet_distribution_iter;
|
||||
use crate::search::facet::facet_distribution_iter::{
|
||||
count_iterate_over_facet_distribution, lexicographically_iterate_over_facet_distribution,
|
||||
};
|
||||
use crate::{FieldId, Index, Result};
|
||||
|
||||
/// The default number of values by facets that will
|
||||
@ -24,10 +27,21 @@ pub const DEFAULT_VALUES_PER_FACET: usize = 100;
|
||||
/// the system to choose between one algorithm or another.
|
||||
const CANDIDATES_THRESHOLD: u64 = 3000;
|
||||
|
||||
/// How should we fetch the facets?
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum OrderBy {
|
||||
/// By lexicographic order...
|
||||
#[default]
|
||||
Lexicographic,
|
||||
/// Or by number of docids in common?
|
||||
Count,
|
||||
}
|
||||
|
||||
pub struct FacetDistribution<'a> {
|
||||
facets: Option<HashSet<String>>,
|
||||
facets: Option<HashMap<String, OrderBy>>,
|
||||
candidates: Option<RoaringBitmap>,
|
||||
max_values_per_facet: usize,
|
||||
default_order_by: OrderBy,
|
||||
rtxn: &'a heed::RoTxn<'a>,
|
||||
index: &'a Index,
|
||||
}
|
||||
@ -38,13 +52,22 @@ impl<'a> FacetDistribution<'a> {
|
||||
facets: None,
|
||||
candidates: None,
|
||||
max_values_per_facet: DEFAULT_VALUES_PER_FACET,
|
||||
default_order_by: OrderBy::default(),
|
||||
rtxn,
|
||||
index,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn facets<I: IntoIterator<Item = A>, A: AsRef<str>>(&mut self, names: I) -> &mut Self {
|
||||
self.facets = Some(names.into_iter().map(|s| s.as_ref().to_string()).collect());
|
||||
pub fn facets<I: IntoIterator<Item = (A, OrderBy)>, A: AsRef<str>>(
|
||||
&mut self,
|
||||
names_ordered_by: I,
|
||||
) -> &mut Self {
|
||||
self.facets = Some(
|
||||
names_ordered_by
|
||||
.into_iter()
|
||||
.map(|(name, order_by)| (name.as_ref().to_string(), order_by))
|
||||
.collect(),
|
||||
);
|
||||
self
|
||||
}
|
||||
|
||||
@ -53,6 +76,11 @@ impl<'a> FacetDistribution<'a> {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn default_order_by(&mut self, order_by: OrderBy) -> &mut Self {
|
||||
self.default_order_by = order_by;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn candidates(&mut self, candidates: RoaringBitmap) -> &mut Self {
|
||||
self.candidates = Some(candidates);
|
||||
self
|
||||
@ -65,7 +93,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
field_id: FieldId,
|
||||
facet_type: FacetType,
|
||||
candidates: &RoaringBitmap,
|
||||
distribution: &mut BTreeMap<String, u64>,
|
||||
distribution: &mut IndexMap<String, u64>,
|
||||
) -> heed::Result<()> {
|
||||
match facet_type {
|
||||
FacetType::Number => {
|
||||
@ -134,9 +162,15 @@ impl<'a> FacetDistribution<'a> {
|
||||
&self,
|
||||
field_id: FieldId,
|
||||
candidates: &RoaringBitmap,
|
||||
distribution: &mut BTreeMap<String, u64>,
|
||||
order_by: OrderBy,
|
||||
distribution: &mut IndexMap<String, u64>,
|
||||
) -> heed::Result<()> {
|
||||
facet_distribution_iter::iterate_over_facet_distribution(
|
||||
let search_function = match order_by {
|
||||
OrderBy::Lexicographic => lexicographically_iterate_over_facet_distribution,
|
||||
OrderBy::Count => count_iterate_over_facet_distribution,
|
||||
};
|
||||
|
||||
search_function(
|
||||
self.rtxn,
|
||||
self.index
|
||||
.facet_id_f64_docids
|
||||
@ -159,9 +193,15 @@ impl<'a> FacetDistribution<'a> {
|
||||
&self,
|
||||
field_id: FieldId,
|
||||
candidates: &RoaringBitmap,
|
||||
distribution: &mut BTreeMap<String, u64>,
|
||||
order_by: OrderBy,
|
||||
distribution: &mut IndexMap<String, u64>,
|
||||
) -> heed::Result<()> {
|
||||
facet_distribution_iter::iterate_over_facet_distribution(
|
||||
let search_function = match order_by {
|
||||
OrderBy::Lexicographic => lexicographically_iterate_over_facet_distribution,
|
||||
OrderBy::Count => count_iterate_over_facet_distribution,
|
||||
};
|
||||
|
||||
search_function(
|
||||
self.rtxn,
|
||||
self.index
|
||||
.facet_id_string_docids
|
||||
@ -189,93 +229,48 @@ impl<'a> FacetDistribution<'a> {
|
||||
)
|
||||
}
|
||||
|
||||
/// Placeholder search, a.k.a. no candidates were specified. We iterate throught the
|
||||
/// facet values one by one and iterate on the facet level 0 for numbers.
|
||||
fn facet_values_from_raw_facet_database(
|
||||
fn facet_values(
|
||||
&self,
|
||||
field_id: FieldId,
|
||||
) -> heed::Result<BTreeMap<String, u64>> {
|
||||
let mut distribution = BTreeMap::new();
|
||||
|
||||
let db = self.index.facet_id_f64_docids;
|
||||
let mut prefix = vec![];
|
||||
prefix.extend_from_slice(&field_id.to_be_bytes());
|
||||
prefix.push(0); // read values from level 0 only
|
||||
|
||||
let iter = db
|
||||
.as_polymorph()
|
||||
.prefix_iter::<_, ByteSlice, ByteSlice>(self.rtxn, prefix.as_slice())?
|
||||
.remap_types::<FacetGroupKeyCodec<OrderedF64Codec>, FacetGroupValueCodec>();
|
||||
|
||||
for result in iter {
|
||||
let (key, value) = result?;
|
||||
distribution.insert(key.left_bound.to_string(), value.bitmap.len());
|
||||
if distribution.len() == self.max_values_per_facet {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let iter = self
|
||||
.index
|
||||
.facet_id_string_docids
|
||||
.as_polymorph()
|
||||
.prefix_iter::<_, ByteSlice, ByteSlice>(self.rtxn, prefix.as_slice())?
|
||||
.remap_types::<FacetGroupKeyCodec<StrRefCodec>, FacetGroupValueCodec>();
|
||||
|
||||
for result in iter {
|
||||
let (key, value) = result?;
|
||||
|
||||
let docid = value.bitmap.iter().next().unwrap();
|
||||
let key: (FieldId, _, &'a str) = (field_id, docid, key.left_bound);
|
||||
let original_string =
|
||||
self.index.field_id_docid_facet_strings.get(self.rtxn, &key)?.unwrap().to_owned();
|
||||
|
||||
distribution.insert(original_string, value.bitmap.len());
|
||||
if distribution.len() == self.max_values_per_facet {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(distribution)
|
||||
}
|
||||
|
||||
fn facet_values(&self, field_id: FieldId) -> heed::Result<BTreeMap<String, u64>> {
|
||||
order_by: OrderBy,
|
||||
) -> heed::Result<IndexMap<String, u64>> {
|
||||
use FacetType::{Number, String};
|
||||
|
||||
match self.candidates {
|
||||
Some(ref candidates) => {
|
||||
let mut distribution = IndexMap::new();
|
||||
match (order_by, &self.candidates) {
|
||||
(OrderBy::Lexicographic, Some(cnd)) if cnd.len() <= CANDIDATES_THRESHOLD => {
|
||||
// Classic search, candidates were specified, we must return facet values only related
|
||||
// to those candidates. We also enter here for facet strings for performance reasons.
|
||||
let mut distribution = BTreeMap::new();
|
||||
if candidates.len() <= CANDIDATES_THRESHOLD {
|
||||
self.facet_distribution_from_documents(
|
||||
field_id,
|
||||
Number,
|
||||
candidates,
|
||||
&mut distribution,
|
||||
)?;
|
||||
self.facet_distribution_from_documents(
|
||||
field_id,
|
||||
String,
|
||||
candidates,
|
||||
&mut distribution,
|
||||
)?;
|
||||
} else {
|
||||
self.facet_numbers_distribution_from_facet_levels(
|
||||
field_id,
|
||||
candidates,
|
||||
&mut distribution,
|
||||
)?;
|
||||
self.facet_strings_distribution_from_facet_levels(
|
||||
field_id,
|
||||
candidates,
|
||||
&mut distribution,
|
||||
)?;
|
||||
}
|
||||
Ok(distribution)
|
||||
self.facet_distribution_from_documents(field_id, Number, cnd, &mut distribution)?;
|
||||
self.facet_distribution_from_documents(field_id, String, cnd, &mut distribution)?;
|
||||
}
|
||||
None => self.facet_values_from_raw_facet_database(field_id),
|
||||
}
|
||||
_ => {
|
||||
let universe;
|
||||
let candidates;
|
||||
match &self.candidates {
|
||||
Some(cnd) => candidates = cnd,
|
||||
None => {
|
||||
universe = self.index.documents_ids(self.rtxn)?;
|
||||
candidates = &universe;
|
||||
}
|
||||
}
|
||||
|
||||
self.facet_numbers_distribution_from_facet_levels(
|
||||
field_id,
|
||||
candidates,
|
||||
order_by,
|
||||
&mut distribution,
|
||||
)?;
|
||||
self.facet_strings_distribution_from_facet_levels(
|
||||
field_id,
|
||||
candidates,
|
||||
order_by,
|
||||
&mut distribution,
|
||||
)?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(distribution)
|
||||
}
|
||||
|
||||
pub fn compute_stats(&self) -> Result<BTreeMap<String, (f64, f64)>> {
|
||||
@ -291,6 +286,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
Some(facets) => {
|
||||
let invalid_fields: HashSet<_> = facets
|
||||
.iter()
|
||||
.map(|(name, _)| name)
|
||||
.filter(|facet| !crate::is_faceted(facet, &filterable_fields))
|
||||
.collect();
|
||||
if !invalid_fields.is_empty() {
|
||||
@ -300,7 +296,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
}
|
||||
.into());
|
||||
} else {
|
||||
facets.clone()
|
||||
facets.into_iter().map(|(name, _)| name).cloned().collect()
|
||||
}
|
||||
}
|
||||
None => filterable_fields,
|
||||
@ -337,7 +333,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
Ok(distribution)
|
||||
}
|
||||
|
||||
pub fn execute(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||
pub fn execute(&self) -> Result<BTreeMap<String, IndexMap<String, u64>>> {
|
||||
let fields_ids_map = self.index.fields_ids_map(self.rtxn)?;
|
||||
let filterable_fields = self.index.filterable_fields(self.rtxn)?;
|
||||
|
||||
@ -345,6 +341,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
Some(ref facets) => {
|
||||
let invalid_fields: HashSet<_> = facets
|
||||
.iter()
|
||||
.map(|(name, _)| name)
|
||||
.filter(|facet| !crate::is_faceted(facet, &filterable_fields))
|
||||
.collect();
|
||||
if !invalid_fields.is_empty() {
|
||||
@ -354,7 +351,7 @@ impl<'a> FacetDistribution<'a> {
|
||||
}
|
||||
.into());
|
||||
} else {
|
||||
facets.clone()
|
||||
facets.into_iter().map(|(name, _)| name).cloned().collect()
|
||||
}
|
||||
}
|
||||
None => filterable_fields,
|
||||
@ -363,7 +360,13 @@ impl<'a> FacetDistribution<'a> {
|
||||
let mut distribution = BTreeMap::new();
|
||||
for (fid, name) in fields_ids_map.iter() {
|
||||
if crate::is_faceted(name, &fields) {
|
||||
let values = self.facet_values(fid)?;
|
||||
let order_by = self
|
||||
.facets
|
||||
.as_ref()
|
||||
.map(|facets| facets.get(name).copied())
|
||||
.flatten()
|
||||
.unwrap_or(self.default_order_by);
|
||||
let values = self.facet_values(fid, order_by)?;
|
||||
distribution.insert(name.to_string(), values);
|
||||
}
|
||||
}
|
||||
@ -374,13 +377,20 @@ impl<'a> FacetDistribution<'a> {
|
||||
|
||||
impl fmt::Debug for FacetDistribution<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let FacetDistribution { facets, candidates, max_values_per_facet, rtxn: _, index: _ } =
|
||||
self;
|
||||
let FacetDistribution {
|
||||
facets,
|
||||
candidates,
|
||||
max_values_per_facet,
|
||||
default_order_by,
|
||||
rtxn: _,
|
||||
index: _,
|
||||
} = self;
|
||||
|
||||
f.debug_struct("FacetDistribution")
|
||||
.field("facets", facets)
|
||||
.field("candidates", candidates)
|
||||
.field("max_values_per_facet", max_values_per_facet)
|
||||
.field("default_order_by", default_order_by)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -392,7 +402,7 @@ mod tests {
|
||||
|
||||
use crate::documents::documents_batch_reader_from_objects;
|
||||
use crate::index::tests::TempIndex;
|
||||
use crate::{milli_snap, FacetDistribution};
|
||||
use crate::{milli_snap, FacetDistribution, OrderBy};
|
||||
|
||||
#[test]
|
||||
fn few_candidates_few_facet_values() {
|
||||
@ -417,14 +427,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.execute()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 2, "RED": 1}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates([0, 1, 2].iter().copied().collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -432,7 +442,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 2, "RED": 1}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates([1, 2].iter().copied().collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -443,7 +453,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {" blue": 1, "RED": 1}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates([2].iter().copied().collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -451,7 +461,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"RED": 1}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates([0, 1, 2].iter().copied().collect())
|
||||
.max_values_per_facet(1)
|
||||
.execute()
|
||||
@ -489,14 +499,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.execute()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 4000, "Red": 6000}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.max_values_per_facet(1)
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -504,7 +514,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 4000}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..10_000).collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -512,7 +522,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 4000, "Red": 6000}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..5_000).collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -520,7 +530,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 2000, "Red": 3000}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..5_000).collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -528,7 +538,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), @r###"{"colour": {"Blue": 2000, "Red": 3000}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..5_000).collect())
|
||||
.max_values_per_facet(1)
|
||||
.execute()
|
||||
@ -566,14 +576,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.execute()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), "no_candidates", @"ac9229ed5964d893af96a7076e2f8af5");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.max_values_per_facet(2)
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -581,7 +591,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "no_candidates_with_max_2", @r###"{"colour": {"0": 10, "1": 10}}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..10_000).collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -589,7 +599,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "candidates_0_10_000", @"ac9229ed5964d893af96a7076e2f8af5");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..5_000).collect())
|
||||
.execute()
|
||||
.unwrap();
|
||||
@ -626,14 +636,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), "no_candidates", @"{}");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..1000).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -641,7 +651,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "candidates_0_1000", @r###"{"colour": (0.0, 999.0)}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((217..777).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -678,14 +688,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), "no_candidates", @"{}");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..1000).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -693,7 +703,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "candidates_0_1000", @r###"{"colour": (0.0, 1999.0)}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((217..777).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -730,14 +740,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), "no_candidates", @"{}");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..1000).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -745,7 +755,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "candidates_0_1000", @r###"{"colour": (0.0, 999.0)}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((217..777).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -786,14 +796,14 @@ mod tests {
|
||||
let txn = index.read_txn().unwrap();
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
|
||||
milli_snap!(format!("{map:?}"), "no_candidates", @"{}");
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((0..1000).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
@ -801,7 +811,7 @@ mod tests {
|
||||
milli_snap!(format!("{map:?}"), "candidates_0_1000", @r###"{"colour": (0.0, 1998.0)}"###);
|
||||
|
||||
let map = FacetDistribution::new(&txn, &index)
|
||||
.facets(std::iter::once("colour"))
|
||||
.facets(std::iter::once(("colour", OrderBy::default())))
|
||||
.candidates((217..777).collect())
|
||||
.compute_stats()
|
||||
.unwrap();
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::cmp::Reverse;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use heed::Result;
|
||||
@ -19,7 +21,7 @@ use crate::DocumentId;
|
||||
///
|
||||
/// The return value of the closure is a `ControlFlow<()>` which indicates whether we should
|
||||
/// keep iterating over the different facet values or stop.
|
||||
pub fn iterate_over_facet_distribution<'t, CB>(
|
||||
pub fn lexicographically_iterate_over_facet_distribution<'t, CB>(
|
||||
rtxn: &'t heed::RoTxn<'t>,
|
||||
db: heed::Database<FacetGroupKeyCodec<ByteSliceRefCodec>, FacetGroupValueCodec>,
|
||||
field_id: u16,
|
||||
@ -29,7 +31,7 @@ pub fn iterate_over_facet_distribution<'t, CB>(
|
||||
where
|
||||
CB: FnMut(&'t [u8], u64, DocumentId) -> Result<ControlFlow<()>>,
|
||||
{
|
||||
let mut fd = FacetDistribution { rtxn, db, field_id, callback };
|
||||
let mut fd = LexicographicFacetDistribution { rtxn, db, field_id, callback };
|
||||
let highest_level = get_highest_level(
|
||||
rtxn,
|
||||
db.remap_key_type::<FacetGroupKeyCodec<ByteSliceRefCodec>>(),
|
||||
@ -44,7 +46,99 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
struct FacetDistribution<'t, CB>
|
||||
pub fn count_iterate_over_facet_distribution<'t, CB>(
|
||||
rtxn: &'t heed::RoTxn<'t>,
|
||||
db: heed::Database<FacetGroupKeyCodec<ByteSliceRefCodec>, FacetGroupValueCodec>,
|
||||
field_id: u16,
|
||||
candidates: &RoaringBitmap,
|
||||
mut callback: CB,
|
||||
) -> Result<()>
|
||||
where
|
||||
CB: FnMut(&'t [u8], u64, DocumentId) -> Result<ControlFlow<()>>,
|
||||
{
|
||||
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
struct LevelEntry<'t> {
|
||||
/// The number of candidates in this entry.
|
||||
count: u64,
|
||||
/// The key level of the entry.
|
||||
level: Reverse<u8>,
|
||||
/// The left bound key.
|
||||
left_bound: &'t [u8],
|
||||
/// The number of keys we must look for after `left_bound`.
|
||||
group_size: u8,
|
||||
/// Any docid in the set of matching documents. Used to find the original facet string.
|
||||
any_docid: u32,
|
||||
}
|
||||
|
||||
// Represents the list of keys that we must explore.
|
||||
let mut heap = BinaryHeap::new();
|
||||
let highest_level = get_highest_level(
|
||||
rtxn,
|
||||
db.remap_key_type::<FacetGroupKeyCodec<ByteSliceRefCodec>>(),
|
||||
field_id,
|
||||
)?;
|
||||
|
||||
if let Some(first_bound) = get_first_facet_value::<ByteSliceRefCodec>(rtxn, db, field_id)? {
|
||||
// We first fill the heap with values from the highest level
|
||||
let starting_key =
|
||||
FacetGroupKey { field_id, level: highest_level, left_bound: first_bound };
|
||||
for el in db.range(rtxn, &(&starting_key..)).unwrap().take(usize::MAX) {
|
||||
let (key, value) = el.unwrap();
|
||||
// The range is unbounded on the right and the group size for the highest level is MAX,
|
||||
// so we need to check that we are not iterating over the next field id
|
||||
if key.field_id != field_id {
|
||||
break;
|
||||
}
|
||||
let intersection = value.bitmap & candidates;
|
||||
let count = intersection.len();
|
||||
if count != 0 {
|
||||
heap.push(LevelEntry {
|
||||
count,
|
||||
level: Reverse(key.level),
|
||||
left_bound: key.left_bound,
|
||||
group_size: value.size,
|
||||
any_docid: intersection.min().unwrap(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(LevelEntry { count, level, left_bound, group_size, any_docid }) = heap.pop()
|
||||
{
|
||||
if let Reverse(0) = level {
|
||||
match (callback)(left_bound, count, any_docid)? {
|
||||
ControlFlow::Continue(_) => (),
|
||||
ControlFlow::Break(_) => return Ok(()),
|
||||
}
|
||||
} else {
|
||||
let starting_key = FacetGroupKey { field_id, level: level.0 - 1, left_bound };
|
||||
for el in db.range(rtxn, &(&starting_key..)).unwrap().take(group_size as usize) {
|
||||
let (key, value) = el.unwrap();
|
||||
// The range is unbounded on the right and the group size for the highest level is MAX,
|
||||
// so we need to check that we are not iterating over the next field id
|
||||
if key.field_id != field_id {
|
||||
break;
|
||||
}
|
||||
let intersection = value.bitmap & candidates;
|
||||
let count = intersection.len();
|
||||
if count != 0 {
|
||||
heap.push(LevelEntry {
|
||||
count,
|
||||
level: Reverse(key.level),
|
||||
left_bound: key.left_bound,
|
||||
group_size: value.size,
|
||||
any_docid: intersection.min().unwrap(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Iterate over the facets values by lexicographic order.
|
||||
struct LexicographicFacetDistribution<'t, CB>
|
||||
where
|
||||
CB: FnMut(&'t [u8], u64, DocumentId) -> Result<ControlFlow<()>>,
|
||||
{
|
||||
@ -54,7 +148,7 @@ where
|
||||
callback: CB,
|
||||
}
|
||||
|
||||
impl<'t, CB> FacetDistribution<'t, CB>
|
||||
impl<'t, CB> LexicographicFacetDistribution<'t, CB>
|
||||
where
|
||||
CB: FnMut(&'t [u8], u64, DocumentId) -> Result<ControlFlow<()>>,
|
||||
{
|
||||
@ -86,6 +180,7 @@ where
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
|
||||
fn iterate(
|
||||
&mut self,
|
||||
candidates: &RoaringBitmap,
|
||||
@ -116,7 +211,7 @@ where
|
||||
value.size as usize,
|
||||
)?;
|
||||
match cf {
|
||||
ControlFlow::Continue(_) => {}
|
||||
ControlFlow::Continue(_) => (),
|
||||
ControlFlow::Break(_) => return Ok(ControlFlow::Break(())),
|
||||
}
|
||||
}
|
||||
@ -132,7 +227,7 @@ mod tests {
|
||||
use heed::BytesDecode;
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use super::iterate_over_facet_distribution;
|
||||
use super::lexicographically_iterate_over_facet_distribution;
|
||||
use crate::heed_codec::facet::OrderedF64Codec;
|
||||
use crate::milli_snap;
|
||||
use crate::search::facet::tests::{get_random_looking_index, get_simple_index};
|
||||
@ -144,7 +239,7 @@ mod tests {
|
||||
let txn = index.env.read_txn().unwrap();
|
||||
let candidates = (0..=255).collect::<RoaringBitmap>();
|
||||
let mut results = String::new();
|
||||
iterate_over_facet_distribution(
|
||||
lexicographically_iterate_over_facet_distribution(
|
||||
&txn,
|
||||
index.content,
|
||||
0,
|
||||
@ -161,6 +256,7 @@ mod tests {
|
||||
txn.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn filter_distribution_all_stop_early() {
|
||||
let indexes = [get_simple_index(), get_random_looking_index()];
|
||||
@ -169,7 +265,7 @@ mod tests {
|
||||
let candidates = (0..=255).collect::<RoaringBitmap>();
|
||||
let mut results = String::new();
|
||||
let mut nbr_facets = 0;
|
||||
iterate_over_facet_distribution(
|
||||
lexicographically_iterate_over_facet_distribution(
|
||||
&txn,
|
||||
index.content,
|
||||
0,
|
||||
|
@ -4,7 +4,7 @@ use heed::types::{ByteSlice, DecodeIgnore};
|
||||
use heed::{BytesDecode, RoTxn};
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
pub use self::facet_distribution::{FacetDistribution, DEFAULT_VALUES_PER_FACET};
|
||||
pub use self::facet_distribution::{FacetDistribution, OrderBy, DEFAULT_VALUES_PER_FACET};
|
||||
pub use self::filter::{BadGeoError, Filter};
|
||||
use crate::heed_codec::facet::{FacetGroupKeyCodec, FacetGroupValueCodec, OrderedF64Codec};
|
||||
use crate::heed_codec::ByteSliceRefCodec;
|
||||
|
@ -4,7 +4,7 @@ use levenshtein_automata::{LevenshteinAutomatonBuilder as LevBuilder, DFA};
|
||||
use once_cell::sync::Lazy;
|
||||
use roaring::bitmap::RoaringBitmap;
|
||||
|
||||
pub use self::facet::{FacetDistribution, Filter, DEFAULT_VALUES_PER_FACET};
|
||||
pub use self::facet::{FacetDistribution, Filter, OrderBy, DEFAULT_VALUES_PER_FACET};
|
||||
pub use self::new::matches::{FormatOptions, MatchBounds, Matcher, MatcherBuilder, MatchingWords};
|
||||
use self::new::PartialSearchResult;
|
||||
use crate::{
|
||||
|
@ -26,6 +26,7 @@ pub fn apply_distinct_rule(
|
||||
ctx: &mut SearchContext,
|
||||
field_id: u16,
|
||||
candidates: &RoaringBitmap,
|
||||
// TODO: add a universe here, such that the `excluded` are a subset of the universe?
|
||||
) -> Result<DistinctOutput> {
|
||||
let mut excluded = RoaringBitmap::new();
|
||||
let mut remaining = RoaringBitmap::new();
|
||||
|
@ -206,7 +206,7 @@ impl State {
|
||||
)?;
|
||||
intersection &= &candidates;
|
||||
if !intersection.is_empty() {
|
||||
// Although not really worth it in terms of performance,
|
||||
// TODO: although not really worth it in terms of performance,
|
||||
// if would be good to put this in cache for the sake of consistency
|
||||
let candidates_with_exact_word_count = if count_all_positions < u8::MAX as usize {
|
||||
ctx.index
|
||||
|
@ -32,7 +32,7 @@ impl<T> Interned<T> {
|
||||
#[derive(Clone)]
|
||||
pub struct DedupInterner<T> {
|
||||
stable_store: Vec<T>,
|
||||
lookup: FxHashMap<T, Interned<T>>,
|
||||
lookup: FxHashMap<T, Interned<T>>, // TODO: Arc
|
||||
}
|
||||
impl<T> Default for DedupInterner<T> {
|
||||
fn default() -> Self {
|
||||
|
@ -1,4 +1,5 @@
|
||||
/// Maximum number of tokens we consider in a single search.
|
||||
// TODO: Loic, find proper value here so we don't overflow the interner.
|
||||
pub const MAX_TOKEN_COUNT: usize = 1_000;
|
||||
|
||||
/// Maximum number of prefixes that can be derived from a single word.
|
||||
|
@ -92,7 +92,7 @@ impl QueryGraph {
|
||||
/// which contains ngrams.
|
||||
pub fn from_query(
|
||||
ctx: &mut SearchContext,
|
||||
// The terms here must be consecutive
|
||||
// NOTE: the terms here must be consecutive
|
||||
terms: &[LocatedQueryTerm],
|
||||
) -> Result<(QueryGraph, Vec<LocatedQueryTerm>)> {
|
||||
let mut new_located_query_terms = terms.to_vec();
|
||||
@ -103,7 +103,7 @@ impl QueryGraph {
|
||||
let root_node = 0;
|
||||
let end_node = 1;
|
||||
|
||||
// Ee could consider generalizing to 4,5,6,7,etc. ngrams
|
||||
// TODO: we could consider generalizing to 4,5,6,7,etc. ngrams
|
||||
let (mut prev2, mut prev1, mut prev0): (Vec<u16>, Vec<u16>, Vec<u16>) =
|
||||
(vec![], vec![], vec![root_node]);
|
||||
|
||||
|
@ -132,6 +132,7 @@ impl QueryTermSubset {
|
||||
if full_query_term.ngram_words.is_some() {
|
||||
return None;
|
||||
}
|
||||
// TODO: included in subset
|
||||
if let Some(phrase) = full_query_term.zero_typo.phrase {
|
||||
self.zero_typo_subset.contains_phrase(phrase).then_some(ExactTerm::Phrase(phrase))
|
||||
} else if let Some(word) = full_query_term.zero_typo.exact {
|
||||
@ -181,6 +182,7 @@ impl QueryTermSubset {
|
||||
let word = match &self.zero_typo_subset {
|
||||
NTypoTermSubset::All => Some(use_prefix_db),
|
||||
NTypoTermSubset::Subset { words, phrases: _ } => {
|
||||
// TODO: use a subset of prefix words instead
|
||||
if words.contains(&use_prefix_db) {
|
||||
Some(use_prefix_db)
|
||||
} else {
|
||||
@ -202,6 +204,7 @@ impl QueryTermSubset {
|
||||
ctx: &mut SearchContext,
|
||||
) -> Result<BTreeSet<Word>> {
|
||||
let mut result = BTreeSet::default();
|
||||
// TODO: a compute_partially funtion
|
||||
if !self.one_typo_subset.is_empty() || !self.two_typo_subset.is_empty() {
|
||||
self.original.compute_fully_if_needed(ctx)?;
|
||||
}
|
||||
@ -297,6 +300,7 @@ impl QueryTermSubset {
|
||||
let mut result = BTreeSet::default();
|
||||
|
||||
if !self.one_typo_subset.is_empty() {
|
||||
// TODO: compute less than fully if possible
|
||||
self.original.compute_fully_if_needed(ctx)?;
|
||||
}
|
||||
let original = ctx.term_interner.get_mut(self.original);
|
||||
|
@ -77,9 +77,13 @@ pub fn located_query_terms_from_tokens(
|
||||
}
|
||||
}
|
||||
TokenKind::Separator(separator_kind) => {
|
||||
// add penalty for hard separators
|
||||
if let SeparatorKind::Hard = separator_kind {
|
||||
position = position.wrapping_add(7);
|
||||
match separator_kind {
|
||||
SeparatorKind::Hard => {
|
||||
position += 1;
|
||||
}
|
||||
SeparatorKind::Soft => {
|
||||
position += 0;
|
||||
}
|
||||
}
|
||||
|
||||
phrase = 'phrase: {
|
||||
@ -139,6 +143,7 @@ pub fn number_of_typos_allowed<'ctx>(
|
||||
let min_len_one_typo = ctx.index.min_word_len_one_typo(ctx.txn)?;
|
||||
let min_len_two_typos = ctx.index.min_word_len_two_typos(ctx.txn)?;
|
||||
|
||||
// TODO: should `exact_words` also disable prefix search, ngrams, split words, or synonyms?
|
||||
let exact_words = ctx.index.exact_words(ctx.txn)?;
|
||||
|
||||
Ok(Box::new(move |word: &str| {
|
||||
@ -249,6 +254,8 @@ impl PhraseBuilder {
|
||||
} else {
|
||||
// token has kind Word
|
||||
let word = ctx.word_interner.insert(token.lemma().to_string());
|
||||
// TODO: in a phrase, check that every word exists
|
||||
// otherwise return an empty term
|
||||
self.words.push(Some(word));
|
||||
}
|
||||
}
|
||||
@ -281,36 +288,3 @@ impl PhraseBuilder {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use charabia::TokenizerBuilder;
|
||||
|
||||
use super::*;
|
||||
use crate::index::tests::TempIndex;
|
||||
|
||||
fn temp_index_with_documents() -> TempIndex {
|
||||
let temp_index = TempIndex::new();
|
||||
temp_index
|
||||
.add_documents(documents!([
|
||||
{ "id": 1, "name": "split this world westfali westfalia the Ŵôřlḑôle" },
|
||||
{ "id": 2, "name": "Westfália" },
|
||||
{ "id": 3, "name": "Ŵôřlḑôle" },
|
||||
]))
|
||||
.unwrap();
|
||||
temp_index
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn start_with_hard_separator() -> Result<()> {
|
||||
let tokenizer = TokenizerBuilder::new().build();
|
||||
let tokens = tokenizer.tokenize(".");
|
||||
let index = temp_index_with_documents();
|
||||
let rtxn = index.read_txn()?;
|
||||
let mut ctx = SearchContext::new(&index, &rtxn);
|
||||
// panics with `attempt to add with overflow` before <https://github.com/meilisearch/meilisearch/issues/3785>
|
||||
let located_query_terms = located_query_terms_from_tokens(&mut ctx, tokens, None)?;
|
||||
assert!(located_query_terms.is_empty());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,48 +1,5 @@
|
||||
/** Implements a "PathVisitor" which finds all paths of a certain cost
|
||||
from the START to END node of a ranking rule graph.
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
A path is a list of conditions. A condition is the data associated with
|
||||
an edge, given by the ranking rule. Some edges don't have a condition associated
|
||||
with them, they are "unconditional". These kinds of edges are used to "skip" a node.
|
||||
|
||||
The algorithm uses a depth-first search. It benefits from two main optimisations:
|
||||
- The list of all possible costs to go from any node to the END node is precomputed
|
||||
- The `DeadEndsCache` reduces the number of valid paths drastically, by making some edges
|
||||
untraversable depending on what other edges were selected.
|
||||
|
||||
These two optimisations are meant to avoid traversing edges that wouldn't lead
|
||||
to a valid path. In practically all cases, we avoid the exponential complexity
|
||||
that is inherent to depth-first search in a large ranking rule graph.
|
||||
|
||||
The DeadEndsCache is a sort of prefix tree which associates a list of forbidden
|
||||
conditions to a list of traversed conditions.
|
||||
For example, the DeadEndsCache could say the following:
|
||||
- Immediately, from the start, the conditions `[a,b]` are forbidden
|
||||
- if we take the condition `c`, then the conditions `[e]` are also forbidden
|
||||
- and if after that, we take `f`, then `[h,i]` are also forbidden
|
||||
- etc.
|
||||
- if we take `g`, then `[f]` is also forbidden
|
||||
- etc.
|
||||
- etc.
|
||||
As we traverse the graph, we also traverse the `DeadEndsCache` and keep a list of forbidden
|
||||
conditions in memory. Then, we know to avoid all edges which have a condition that is forbidden.
|
||||
|
||||
When a path is found from START to END, we give it to the `visit` closure.
|
||||
This closure takes a mutable reference to the `DeadEndsCache`. This means that
|
||||
the caller can update this cache. Therefore, we must handle the case where the
|
||||
DeadEndsCache has been updated. This means potentially backtracking up to the point
|
||||
where the traversed conditions are all allowed by the new DeadEndsCache.
|
||||
|
||||
The algorithm also implements the `TermsMatchingStrategy` logic.
|
||||
Some edges are augmented with a list of "nodes_to_skip". Skipping
|
||||
a node means "reaching this node through an unconditional edge". If we have
|
||||
already traversed (ie. not skipped) a node that is in this list, then we know that we
|
||||
can't traverse this edge. Otherwise, we traverse the edge but make sure to skip any
|
||||
future node that was present in the "nodes_to_skip" list.
|
||||
|
||||
The caller can decide to stop the path finding algorithm
|
||||
by returning a `ControlFlow::Break` from the `visit` closure.
|
||||
*/
|
||||
use std::collections::{BTreeSet, VecDeque};
|
||||
use std::iter::FromIterator;
|
||||
use std::ops::ControlFlow;
|
||||
@ -55,41 +12,30 @@ use crate::search::new::query_graph::QueryNode;
|
||||
use crate::search::new::small_bitmap::SmallBitmap;
|
||||
use crate::Result;
|
||||
|
||||
/// Closure which processes a path found by the `PathVisitor`
|
||||
type VisitFn<'f, G> = &'f mut dyn FnMut(
|
||||
// the path as a list of conditions
|
||||
&[Interned<<G as RankingRuleGraphTrait>::Condition>],
|
||||
&mut RankingRuleGraph<G>,
|
||||
// a mutable reference to the DeadEndsCache, to update it in case the given
|
||||
// path doesn't resolve to any valid document ids
|
||||
&mut DeadEndsCache<<G as RankingRuleGraphTrait>::Condition>,
|
||||
) -> Result<ControlFlow<()>>;
|
||||
|
||||
/// A structure which is kept but not updated during the traversal of the graph.
|
||||
/// It can however be updated by the `visit` closure once a valid path has been found.
|
||||
struct VisitorContext<'a, G: RankingRuleGraphTrait> {
|
||||
graph: &'a mut RankingRuleGraph<G>,
|
||||
all_costs_from_node: &'a MappedInterner<QueryNode, Vec<u64>>,
|
||||
dead_ends_cache: &'a mut DeadEndsCache<G::Condition>,
|
||||
}
|
||||
|
||||
/// The internal state of the traversal algorithm
|
||||
struct VisitorState<G: RankingRuleGraphTrait> {
|
||||
/// Budget from the current node to the end node
|
||||
remaining_cost: u64,
|
||||
/// Previously visited conditions, in order.
|
||||
|
||||
path: Vec<Interned<G::Condition>>,
|
||||
/// Previously visited conditions, as an efficient and compact set.
|
||||
|
||||
visited_conditions: SmallBitmap<G::Condition>,
|
||||
/// Previously visited (ie not skipped) nodes, as an efficient and compact set.
|
||||
visited_nodes: SmallBitmap<QueryNode>,
|
||||
/// The conditions that cannot be visited anymore
|
||||
|
||||
forbidden_conditions: SmallBitmap<G::Condition>,
|
||||
/// The nodes that cannot be visited anymore (they must be skipped)
|
||||
nodes_to_skip: SmallBitmap<QueryNode>,
|
||||
forbidden_conditions_to_nodes: SmallBitmap<QueryNode>,
|
||||
}
|
||||
|
||||
/// See module documentation
|
||||
pub struct PathVisitor<'a, G: RankingRuleGraphTrait> {
|
||||
state: VisitorState<G>,
|
||||
ctx: VisitorContext<'a, G>,
|
||||
@ -110,13 +56,14 @@ impl<'a, G: RankingRuleGraphTrait> PathVisitor<'a, G> {
|
||||
forbidden_conditions: SmallBitmap::for_interned_values_in(
|
||||
&graph.conditions_interner,
|
||||
),
|
||||
nodes_to_skip: SmallBitmap::for_interned_values_in(&graph.query_graph.nodes),
|
||||
forbidden_conditions_to_nodes: SmallBitmap::for_interned_values_in(
|
||||
&graph.query_graph.nodes,
|
||||
),
|
||||
},
|
||||
ctx: VisitorContext { graph, all_costs_from_node, dead_ends_cache },
|
||||
}
|
||||
}
|
||||
|
||||
/// See module documentation
|
||||
pub fn visit_paths(mut self, visit: VisitFn<G>) -> Result<()> {
|
||||
let _ =
|
||||
self.state.visit_node(self.ctx.graph.query_graph.root_node, visit, &mut self.ctx)?;
|
||||
@ -125,31 +72,22 @@ impl<'a, G: RankingRuleGraphTrait> PathVisitor<'a, G> {
|
||||
}
|
||||
|
||||
impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
/// Visits a node: traverse all its valid conditional and unconditional edges.
|
||||
///
|
||||
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||
/// Returns whether a valid path was found from this node otherwise.
|
||||
fn visit_node(
|
||||
&mut self,
|
||||
from_node: Interned<QueryNode>,
|
||||
visit: VisitFn<G>,
|
||||
ctx: &mut VisitorContext<G>,
|
||||
) -> Result<ControlFlow<(), bool>> {
|
||||
// any valid path will be found from this point
|
||||
// if a valid path was found, then we know that the DeadEndsCache may have been updated,
|
||||
// and we will need to do more work to potentially backtrack
|
||||
let mut any_valid = false;
|
||||
|
||||
let edges = ctx.graph.edges_of_node.get(from_node).clone();
|
||||
for edge_idx in edges.iter() {
|
||||
// could be none if the edge was deleted
|
||||
let Some(edge) = ctx.graph.edges_store.get(edge_idx).clone() else { continue };
|
||||
|
||||
if self.remaining_cost < edge.cost as u64 {
|
||||
continue;
|
||||
}
|
||||
self.remaining_cost -= edge.cost as u64;
|
||||
|
||||
let cf = match edge.condition {
|
||||
Some(condition) => self.visit_condition(
|
||||
condition,
|
||||
@ -181,10 +119,6 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
Ok(ControlFlow::Continue(any_valid))
|
||||
}
|
||||
|
||||
/// Visits an unconditional edge.
|
||||
///
|
||||
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||
/// Returns whether a valid path was found from this node otherwise.
|
||||
fn visit_no_condition(
|
||||
&mut self,
|
||||
dest_node: Interned<QueryNode>,
|
||||
@ -200,29 +134,20 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
{
|
||||
return Ok(ControlFlow::Continue(false));
|
||||
}
|
||||
// We've reached the END node!
|
||||
if dest_node == ctx.graph.query_graph.end_node {
|
||||
let control_flow = visit(&self.path, ctx.graph, ctx.dead_ends_cache)?;
|
||||
// We could change the return type of the visit closure such that the caller
|
||||
// tells us whether the dead ends cache was updated or not.
|
||||
// Alternatively, maybe the DeadEndsCache should have a generation number
|
||||
// to it, so that we don't need to play with these booleans at all.
|
||||
match control_flow {
|
||||
ControlFlow::Continue(_) => Ok(ControlFlow::Continue(true)),
|
||||
ControlFlow::Break(_) => Ok(ControlFlow::Break(())),
|
||||
}
|
||||
} else {
|
||||
let old_fbct = self.nodes_to_skip.clone();
|
||||
self.nodes_to_skip.union(edge_new_nodes_to_skip);
|
||||
let old_fbct = self.forbidden_conditions_to_nodes.clone();
|
||||
self.forbidden_conditions_to_nodes.union(edge_new_nodes_to_skip);
|
||||
let cf = self.visit_node(dest_node, visit, ctx)?;
|
||||
self.nodes_to_skip = old_fbct;
|
||||
self.forbidden_conditions_to_nodes = old_fbct;
|
||||
Ok(cf)
|
||||
}
|
||||
}
|
||||
/// Visits a conditional edge.
|
||||
///
|
||||
/// Returns ControlFlow::Break if the path finding algorithm should stop.
|
||||
/// Returns whether a valid path was found from this node otherwise.
|
||||
fn visit_condition(
|
||||
&mut self,
|
||||
condition: Interned<G::Condition>,
|
||||
@ -234,7 +159,7 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
assert!(dest_node != ctx.graph.query_graph.end_node);
|
||||
|
||||
if self.forbidden_conditions.contains(condition)
|
||||
|| self.nodes_to_skip.contains(dest_node)
|
||||
|| self.forbidden_conditions_to_nodes.contains(dest_node)
|
||||
|| edge_new_nodes_to_skip.intersects(&self.visited_nodes)
|
||||
{
|
||||
return Ok(ControlFlow::Continue(false));
|
||||
@ -255,19 +180,19 @@ impl<G: RankingRuleGraphTrait> VisitorState<G> {
|
||||
self.visited_nodes.insert(dest_node);
|
||||
self.visited_conditions.insert(condition);
|
||||
|
||||
let old_forb_cond = self.forbidden_conditions.clone();
|
||||
let old_fc = self.forbidden_conditions.clone();
|
||||
if let Some(next_forbidden) =
|
||||
ctx.dead_ends_cache.forbidden_conditions_after_prefix(self.path.iter().copied())
|
||||
{
|
||||
self.forbidden_conditions.union(&next_forbidden);
|
||||
}
|
||||
let old_nodes_to_skip = self.nodes_to_skip.clone();
|
||||
self.nodes_to_skip.union(edge_new_nodes_to_skip);
|
||||
let old_fctn = self.forbidden_conditions_to_nodes.clone();
|
||||
self.forbidden_conditions_to_nodes.union(edge_new_nodes_to_skip);
|
||||
|
||||
let cf = self.visit_node(dest_node, visit, ctx)?;
|
||||
|
||||
self.nodes_to_skip = old_nodes_to_skip;
|
||||
self.forbidden_conditions = old_forb_cond;
|
||||
self.forbidden_conditions_to_nodes = old_fctn;
|
||||
self.forbidden_conditions = old_fc;
|
||||
|
||||
self.visited_conditions.remove(condition);
|
||||
self.visited_nodes.remove(dest_node);
|
||||
|
@ -9,8 +9,12 @@ use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||
use crate::search::new::SearchContext;
|
||||
use crate::Result;
|
||||
|
||||
// TODO: give a generation to each universe, then be able to get the exact
|
||||
// delta of docids between two universes of different generations!
|
||||
|
||||
/// A cache storing the document ids associated with each ranking rule edge
|
||||
pub struct ConditionDocIdsCache<G: RankingRuleGraphTrait> {
|
||||
// TOOD: should be a mapped interner?
|
||||
pub cache: FxHashMap<Interned<G::Condition>, ComputedCondition>,
|
||||
_phantom: PhantomData<G>,
|
||||
}
|
||||
@ -50,7 +54,7 @@ impl<G: RankingRuleGraphTrait> ConditionDocIdsCache<G> {
|
||||
}
|
||||
let condition = graph.conditions_interner.get_mut(interned_condition);
|
||||
let computed = G::resolve_condition(ctx, condition, universe)?;
|
||||
// Can we put an assert here for computed.universe_len == universe.len() ?
|
||||
// TODO: if computed.universe_len != universe.len() ?
|
||||
let _ = self.cache.insert(interned_condition, computed);
|
||||
let computed = &self.cache[&interned_condition];
|
||||
Ok(computed)
|
||||
|
@ -2,7 +2,6 @@ use crate::search::new::interner::{FixedSizeInterner, Interned};
|
||||
use crate::search::new::small_bitmap::SmallBitmap;
|
||||
|
||||
pub struct DeadEndsCache<T> {
|
||||
// conditions and next could/should be part of the same vector
|
||||
conditions: Vec<Interned<T>>,
|
||||
next: Vec<Self>,
|
||||
pub forbidden: SmallBitmap<T>,
|
||||
@ -28,7 +27,7 @@ impl<T> DeadEndsCache<T> {
|
||||
self.forbidden.insert(condition);
|
||||
}
|
||||
|
||||
fn advance(&mut self, condition: Interned<T>) -> Option<&mut Self> {
|
||||
pub fn advance(&mut self, condition: Interned<T>) -> Option<&mut Self> {
|
||||
if let Some(idx) = self.conditions.iter().position(|c| *c == condition) {
|
||||
Some(&mut self.next[idx])
|
||||
} else {
|
||||
|
@ -69,9 +69,14 @@ impl RankingRuleGraphTrait for FidGraph {
|
||||
|
||||
let mut edges = vec![];
|
||||
for fid in all_fields {
|
||||
// TODO: We can improve performances and relevancy by storing
|
||||
// the term subsets associated to each field ids fetched.
|
||||
edges.push((
|
||||
fid as u32 * term.term_ids.len() as u32,
|
||||
conditions_interner.insert(FidCondition { term: term.clone(), fid }),
|
||||
fid as u32 * term.term_ids.len() as u32, // TODO improve the fid score i.e. fid^10.
|
||||
conditions_interner.insert(FidCondition {
|
||||
term: term.clone(), // TODO remove this ugly clone
|
||||
fid,
|
||||
}),
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -94,9 +94,14 @@ impl RankingRuleGraphTrait for PositionGraph {
|
||||
let mut edges = vec![];
|
||||
|
||||
for (cost, positions) in positions_for_costs {
|
||||
// TODO: We can improve performances and relevancy by storing
|
||||
// the term subsets associated to each position fetched
|
||||
edges.push((
|
||||
cost,
|
||||
conditions_interner.insert(PositionCondition { term: term.clone(), positions }),
|
||||
conditions_interner.insert(PositionCondition {
|
||||
term: term.clone(), // TODO remove this ugly clone
|
||||
positions,
|
||||
}),
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -65,6 +65,13 @@ pub fn compute_docids(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add safeguard in case the cartesian product is too large!
|
||||
// even if we restrict the word derivations to a maximum of 100, the size of the
|
||||
// caterisan product could reach a maximum of 10_000 derivations, which is way too much.
|
||||
// Maybe prioritise the product of zero typo derivations, then the product of zero-typo/one-typo
|
||||
// + one-typo/zero-typo, then one-typo/one-typo, then ... until an arbitrary limit has been
|
||||
// reached
|
||||
|
||||
for (left_phrase, left_word) in last_words_of_term_derivations(ctx, &left_term.term_subset)? {
|
||||
// Before computing the edges, check that the left word and left phrase
|
||||
// aren't disjoint with the universe, but only do it if there is more than
|
||||
@ -104,6 +111,8 @@ pub fn compute_docids(
|
||||
Ok(ComputedCondition {
|
||||
docids,
|
||||
universe_len: universe.len(),
|
||||
// TODO: think about whether we want to reduce the subset,
|
||||
// we probably should!
|
||||
start_term_subset: Some(left_term.clone()),
|
||||
end_term_subset: right_term.clone(),
|
||||
})
|
||||
@ -194,7 +203,12 @@ fn compute_non_prefix_edges(
|
||||
*docids |= new_docids;
|
||||
}
|
||||
}
|
||||
if backward_proximity >= 1 && left_phrase.is_none() && right_phrase.is_none() {
|
||||
if backward_proximity >= 1
|
||||
// TODO: for now, we don't do any swapping when either term is a phrase
|
||||
// but maybe we should. We'd need to look at the first/last word of the phrase
|
||||
// depending on the context.
|
||||
&& left_phrase.is_none() && right_phrase.is_none()
|
||||
{
|
||||
if let Some(new_docids) =
|
||||
ctx.get_db_word_pair_proximity_docids(word2, word1, backward_proximity)?
|
||||
{
|
||||
|
@ -33,6 +33,8 @@ pub fn compute_query_term_subset_docids(
|
||||
ctx: &mut SearchContext,
|
||||
term: &QueryTermSubset,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// TODO Use the roaring::MultiOps trait
|
||||
|
||||
let mut docids = RoaringBitmap::new();
|
||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||
if let Some(word_docids) = ctx.word_docids(word)? {
|
||||
@ -57,6 +59,8 @@ pub fn compute_query_term_subset_docids_within_field_id(
|
||||
term: &QueryTermSubset,
|
||||
fid: u16,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// TODO Use the roaring::MultiOps trait
|
||||
|
||||
let mut docids = RoaringBitmap::new();
|
||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(word.interned(), fid)? {
|
||||
@ -67,6 +71,7 @@ pub fn compute_query_term_subset_docids_within_field_id(
|
||||
for phrase in term.all_phrases(ctx)? {
|
||||
// There may be false positives when resolving a phrase, so we're not
|
||||
// guaranteed that all of its words are within a single fid.
|
||||
// TODO: fix this?
|
||||
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
||||
if let Some(word_fid_docids) = ctx.get_db_word_fid_docids(*word, fid)? {
|
||||
docids |= ctx.get_phrase_docids(phrase)? & word_fid_docids;
|
||||
@ -90,6 +95,7 @@ pub fn compute_query_term_subset_docids_within_position(
|
||||
term: &QueryTermSubset,
|
||||
position: u16,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// TODO Use the roaring::MultiOps trait
|
||||
let mut docids = RoaringBitmap::new();
|
||||
for word in term.all_single_words_except_prefix_db(ctx)? {
|
||||
if let Some(word_position_docids) =
|
||||
@ -102,6 +108,7 @@ pub fn compute_query_term_subset_docids_within_position(
|
||||
for phrase in term.all_phrases(ctx)? {
|
||||
// It's difficult to know the expected position of the words in the phrase,
|
||||
// so instead we just check the first one.
|
||||
// TODO: fix this?
|
||||
if let Some(word) = phrase.words(ctx).iter().flatten().next() {
|
||||
if let Some(word_position_docids) = ctx.get_db_word_position_docids(*word, position)? {
|
||||
docids |= ctx.get_phrase_docids(phrase)? & word_position_docids
|
||||
@ -125,6 +132,9 @@ pub fn compute_query_graph_docids(
|
||||
q: &QueryGraph,
|
||||
universe: &RoaringBitmap,
|
||||
) -> Result<RoaringBitmap> {
|
||||
// TODO: there must be a faster way to compute this big
|
||||
// roaring bitmap expression
|
||||
|
||||
let mut nodes_resolved = SmallBitmap::for_interned_values_in(&q.nodes);
|
||||
let mut path_nodes_docids = q.nodes.map(|_| RoaringBitmap::new());
|
||||
|
||||
|
@ -141,6 +141,10 @@ impl<'ctx, Query: RankingRuleQueryTrait> RankingRule<'ctx, Query> for Sort<'ctx,
|
||||
universe: &RoaringBitmap,
|
||||
) -> Result<Option<RankingRuleOutput<Query>>> {
|
||||
let iter = self.iter.as_mut().unwrap();
|
||||
// TODO: we should make use of the universe in the function below
|
||||
// good for correctness, but ideally iter.next_bucket would take the current universe into account,
|
||||
// as right now it could return buckets that don't intersect with the universe, meaning we will make many
|
||||
// unneeded calls.
|
||||
if let Some(mut bucket) = iter.next_bucket()? {
|
||||
bucket.candidates &= universe;
|
||||
Ok(Some(bucket))
|
||||
|
@ -527,7 +527,7 @@ fn test_distinct_all_candidates() {
|
||||
let SearchResult { documents_ids, candidates, .. } = s.execute().unwrap();
|
||||
let candidates = candidates.iter().collect::<Vec<_>>();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[14, 26, 4, 7, 17, 23, 1, 19, 25, 8, 20, 24]");
|
||||
// This is incorrect, but unfortunately impossible to do better efficiently.
|
||||
// TODO: this is incorrect!
|
||||
insta::assert_snapshot!(format!("{candidates:?}"), @"[1, 4, 7, 8, 14, 17, 19, 20, 23, 24, 25, 26]");
|
||||
}
|
||||
|
||||
|
@ -122,11 +122,11 @@ fn create_edge_cases_index() -> TempIndex {
|
||||
sta stb stc ste stf stg sth sti stj stk stl stm stn sto stp stq str stst stt stu stv stw stx sty stz
|
||||
"
|
||||
},
|
||||
// The next 5 documents lay out a trap with the split word, phrase search, or synonym `sun flower`.
|
||||
// If the search query is "sunflower", the split word "Sun Flower" will match some documents.
|
||||
// The next 5 documents lay out a trap with the split word, phrase search, or synonym `sun flower`.
|
||||
// If the search query is "sunflower", the split word "Sun Flower" will match some documents.
|
||||
// If the query is `sunflower wilting`, then we should make sure that
|
||||
// the proximity condition `flower wilting: sprx N` also comes with the condition
|
||||
// `sun wilting: sprx N+1`, but this is not the exact condition we use for now.
|
||||
// the sprximity condition `flower wilting: sprx N` also comes with the condition
|
||||
// `sun wilting: sprx N+1`. TODO: this is not the exact condition we use for now.
|
||||
// We only check that the phrase `sun flower` exists and `flower wilting: sprx N`, which
|
||||
// is better than nothing but not the best.
|
||||
{
|
||||
@ -139,7 +139,7 @@ fn create_edge_cases_index() -> TempIndex {
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
// This document matches the query `sunflower wilting`, but the sprximity condition
|
||||
// This document matches the query `sunflower wilting`, but the sprximity condition
|
||||
// between `sunflower` and `wilting` cannot be through the split-word `Sun Flower`
|
||||
// which would reduce to only `flower` and `wilting` being in sprximity.
|
||||
"text": "A flower wilting under the sun, unlike a sunflower"
|
||||
@ -299,7 +299,7 @@ fn test_proximity_split_word() {
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 5, 1, 3]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
// "2" and "4" should be swapped ideally
|
||||
// TODO: "2" and "4" should be swapped ideally
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||
@ -316,7 +316,7 @@ fn test_proximity_split_word() {
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
// "2" and "4" should be swapped ideally
|
||||
// TODO: "2" and "4" should be swapped ideally
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||
@ -341,7 +341,7 @@ fn test_proximity_split_word() {
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 4, 1]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
// "2" and "4" should be swapped ideally
|
||||
// TODO: "2" and "4" should be swapped ideally
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"Sun Flower sounds like the title of a painting, maybe about a flower wilting under the heat.\"",
|
||||
|
@ -2,8 +2,9 @@
|
||||
This module tests the interactions between the proximity and typo ranking rules.
|
||||
|
||||
The proximity ranking rule should transform the query graph such that it
|
||||
only contains the word pairs that it used to compute its bucket, but this is not currently
|
||||
implemented.
|
||||
only contains the word pairs that it used to compute its bucket.
|
||||
|
||||
TODO: This is not currently implemented.
|
||||
*/
|
||||
|
||||
use crate::index::tests::TempIndex;
|
||||
@ -63,7 +64,7 @@ fn test_trap_basic() {
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0, 1]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
// This is incorrect, 1 should come before 0
|
||||
// TODO: this is incorrect, 1 should come before 0
|
||||
insta::assert_debug_snapshot!(texts, @r###"
|
||||
[
|
||||
"\"summer. holiday. sommer holidty\"",
|
||||
|
@ -571,8 +571,8 @@ fn test_typo_synonyms() {
|
||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||
s.query("the fast brownish fox jumps over the lackadaisical dog");
|
||||
|
||||
// The interaction of ngrams + synonyms means that the multi-word synonyms end up having a typo cost.
|
||||
// This is probably not what we want.
|
||||
// TODO: is this correct? interaction of ngrams + synonyms means that the
|
||||
// multi-word synonyms end up having a typo cost. This is probably not what we want.
|
||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[21, 0, 22]");
|
||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||
|
@ -89,6 +89,7 @@ Create a snapshot test of the given database.
|
||||
- `exact_word_docids`
|
||||
- `word_prefix_docids`
|
||||
- `exact_word_prefix_docids`
|
||||
- `docid_word_positions`
|
||||
- `word_pair_proximity_docids`
|
||||
- `word_prefix_pair_proximity_docids`
|
||||
- `word_position_docids`
|
||||
@ -216,6 +217,11 @@ pub fn snap_exact_word_prefix_docids(index: &Index) -> String {
|
||||
&format!("{s:<16} {}", display_bitmap(&b))
|
||||
})
|
||||
}
|
||||
pub fn snap_docid_word_positions(index: &Index) -> String {
|
||||
make_db_snap_from_iter!(index, docid_word_positions, |((idx, s), b)| {
|
||||
&format!("{idx:<6} {s:<16} {}", display_bitmap(&b))
|
||||
})
|
||||
}
|
||||
pub fn snap_word_pair_proximity_docids(index: &Index) -> String {
|
||||
make_db_snap_from_iter!(index, word_pair_proximity_docids, |((proximity, word1, word2), b)| {
|
||||
&format!("{proximity:<2} {word1:<16} {word2:<16} {}", display_bitmap(&b))
|
||||
@ -318,7 +324,7 @@ pub fn snap_field_distributions(index: &Index) -> String {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut snap = String::new();
|
||||
for (field, count) in index.field_distribution(&rtxn).unwrap() {
|
||||
writeln!(&mut snap, "{field:<16} {count:<6} |").unwrap();
|
||||
writeln!(&mut snap, "{field:<16} {count:<6}").unwrap();
|
||||
}
|
||||
snap
|
||||
}
|
||||
@ -328,7 +334,7 @@ pub fn snap_fields_ids_map(index: &Index) -> String {
|
||||
let mut snap = String::new();
|
||||
for field_id in fields_ids_map.ids() {
|
||||
let name = fields_ids_map.name(field_id).unwrap();
|
||||
writeln!(&mut snap, "{field_id:<3} {name:<16} |").unwrap();
|
||||
writeln!(&mut snap, "{field_id:<3} {name:<16}").unwrap();
|
||||
}
|
||||
snap
|
||||
}
|
||||
@ -471,6 +477,9 @@ macro_rules! full_snap_of_db {
|
||||
($index:ident, exact_word_prefix_docids) => {{
|
||||
$crate::snapshot_tests::snap_exact_word_prefix_docids(&$index)
|
||||
}};
|
||||
($index:ident, docid_word_positions) => {{
|
||||
$crate::snapshot_tests::snap_docid_word_positions(&$index)
|
||||
}};
|
||||
($index:ident, word_pair_proximity_docids) => {{
|
||||
$crate::snapshot_tests::snap_word_pair_proximity_docids(&$index)
|
||||
}};
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
source: milli/src/index.rs
|
||||
---
|
||||
age 1 |
|
||||
id 2 |
|
||||
name 2 |
|
||||
age 1
|
||||
id 2
|
||||
name 2
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
source: milli/src/index.rs
|
||||
---
|
||||
age 1 |
|
||||
id 2 |
|
||||
name 2 |
|
||||
age 1
|
||||
id 2
|
||||
name 2
|
||||
|
||||
|
@ -23,6 +23,7 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
||||
exact_word_docids,
|
||||
word_prefix_docids,
|
||||
exact_word_prefix_docids,
|
||||
docid_word_positions,
|
||||
word_pair_proximity_docids,
|
||||
word_prefix_pair_proximity_docids,
|
||||
prefix_word_pair_proximity_docids,
|
||||
@ -79,6 +80,7 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
||||
exact_word_docids.clear(self.wtxn)?;
|
||||
word_prefix_docids.clear(self.wtxn)?;
|
||||
exact_word_prefix_docids.clear(self.wtxn)?;
|
||||
docid_word_positions.clear(self.wtxn)?;
|
||||
word_pair_proximity_docids.clear(self.wtxn)?;
|
||||
word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
||||
prefix_word_pair_proximity_docids.clear(self.wtxn)?;
|
||||
@ -139,6 +141,7 @@ mod tests {
|
||||
|
||||
assert!(index.word_docids.is_empty(&rtxn).unwrap());
|
||||
assert!(index.word_prefix_docids.is_empty(&rtxn).unwrap());
|
||||
assert!(index.docid_word_positions.is_empty(&rtxn).unwrap());
|
||||
assert!(index.word_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||
assert!(index.field_id_word_count_docids.is_empty(&rtxn).unwrap());
|
||||
assert!(index.word_prefix_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::collections::btree_map::Entry;
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use fst::IntoStreamer;
|
||||
use heed::types::{ByteSlice, DecodeIgnore, Str, UnalignedSlice};
|
||||
@ -15,7 +15,8 @@ use crate::facet::FacetType;
|
||||
use crate::heed_codec::facet::FieldDocIdFacetCodec;
|
||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||
use crate::{
|
||||
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec, BEU32,
|
||||
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec,
|
||||
SmallString32, BEU32,
|
||||
};
|
||||
|
||||
pub struct DeleteDocuments<'t, 'u, 'i> {
|
||||
@ -71,6 +72,7 @@ impl std::fmt::Display for DeletionStrategy {
|
||||
pub(crate) struct DetailedDocumentDeletionResult {
|
||||
pub deleted_documents: u64,
|
||||
pub remaining_documents: u64,
|
||||
pub soft_deletion_used: bool,
|
||||
}
|
||||
|
||||
impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
@ -107,8 +109,11 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
Some(docid)
|
||||
}
|
||||
pub fn execute(self) -> Result<DocumentDeletionResult> {
|
||||
let DetailedDocumentDeletionResult { deleted_documents, remaining_documents } =
|
||||
self.execute_inner()?;
|
||||
let DetailedDocumentDeletionResult {
|
||||
deleted_documents,
|
||||
remaining_documents,
|
||||
soft_deletion_used: _,
|
||||
} = self.execute_inner()?;
|
||||
|
||||
Ok(DocumentDeletionResult { deleted_documents, remaining_documents })
|
||||
}
|
||||
@ -129,6 +134,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
return Ok(DetailedDocumentDeletionResult {
|
||||
deleted_documents: 0,
|
||||
remaining_documents: 0,
|
||||
soft_deletion_used: false,
|
||||
});
|
||||
}
|
||||
|
||||
@ -144,6 +150,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
return Ok(DetailedDocumentDeletionResult {
|
||||
deleted_documents: current_documents_ids_len,
|
||||
remaining_documents,
|
||||
soft_deletion_used: false,
|
||||
});
|
||||
}
|
||||
|
||||
@ -212,6 +219,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
return Ok(DetailedDocumentDeletionResult {
|
||||
deleted_documents: self.to_delete_docids.len(),
|
||||
remaining_documents: documents_ids.len(),
|
||||
soft_deletion_used: true,
|
||||
});
|
||||
}
|
||||
|
||||
@ -224,6 +232,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
exact_word_docids,
|
||||
word_prefix_docids,
|
||||
exact_word_prefix_docids,
|
||||
docid_word_positions,
|
||||
word_pair_proximity_docids,
|
||||
field_id_word_count_docids,
|
||||
word_prefix_pair_proximity_docids,
|
||||
@ -242,9 +251,23 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
facet_id_is_empty_docids,
|
||||
documents,
|
||||
} = self.index;
|
||||
// Remove from the documents database
|
||||
|
||||
// Retrieve the words contained in the documents.
|
||||
let mut words = Vec::new();
|
||||
for docid in &self.to_delete_docids {
|
||||
documents.delete(self.wtxn, &BEU32::new(docid))?;
|
||||
|
||||
// We iterate through the words positions of the document id, retrieve the word and delete the positions.
|
||||
// We create an iterator to be able to get the content and delete the key-value itself.
|
||||
// It's faster to acquire a cursor to get and delete, as we avoid traversing the LMDB B-Tree two times but only once.
|
||||
let mut iter = docid_word_positions.prefix_iter_mut(self.wtxn, &(docid, ""))?;
|
||||
while let Some(result) = iter.next() {
|
||||
let ((_docid, word), _positions) = result?;
|
||||
// This boolean will indicate if we must remove this word from the words FST.
|
||||
words.push((SmallString32::from(word), false));
|
||||
// safety: we don't keep references from inside the LMDB database.
|
||||
unsafe { iter.del_current()? };
|
||||
}
|
||||
}
|
||||
// We acquire the current external documents ids map...
|
||||
// Note that its soft-deleted document ids field will be equal to the `to_delete_docids`
|
||||
@ -255,27 +278,42 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
let new_external_documents_ids = new_external_documents_ids.into_static();
|
||||
self.index.put_external_documents_ids(self.wtxn, &new_external_documents_ids)?;
|
||||
|
||||
let mut words_to_keep = BTreeSet::default();
|
||||
let mut words_to_delete = BTreeSet::default();
|
||||
// Maybe we can improve the get performance of the words
|
||||
// if we sort the words first, keeping the LMDB pages in cache.
|
||||
words.sort_unstable();
|
||||
|
||||
// We iterate over the words and delete the documents ids
|
||||
// from the word docids database.
|
||||
remove_from_word_docids(
|
||||
self.wtxn,
|
||||
word_docids,
|
||||
&self.to_delete_docids,
|
||||
&mut words_to_keep,
|
||||
&mut words_to_delete,
|
||||
)?;
|
||||
remove_from_word_docids(
|
||||
self.wtxn,
|
||||
exact_word_docids,
|
||||
&self.to_delete_docids,
|
||||
&mut words_to_keep,
|
||||
&mut words_to_delete,
|
||||
)?;
|
||||
for (word, must_remove) in &mut words {
|
||||
remove_from_word_docids(
|
||||
self.wtxn,
|
||||
word_docids,
|
||||
word.as_str(),
|
||||
must_remove,
|
||||
&self.to_delete_docids,
|
||||
)?;
|
||||
|
||||
remove_from_word_docids(
|
||||
self.wtxn,
|
||||
exact_word_docids,
|
||||
word.as_str(),
|
||||
must_remove,
|
||||
&self.to_delete_docids,
|
||||
)?;
|
||||
}
|
||||
|
||||
// We construct an FST set that contains the words to delete from the words FST.
|
||||
let words_to_delete = fst::Set::from_iter(words_to_delete.difference(&words_to_keep))?;
|
||||
let words_to_delete =
|
||||
words.iter().filter_map(
|
||||
|(word, must_remove)| {
|
||||
if *must_remove {
|
||||
Some(word.as_str())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
);
|
||||
let words_to_delete = fst::Set::from_iter(words_to_delete)?;
|
||||
|
||||
let new_words_fst = {
|
||||
// We retrieve the current words FST from the database.
|
||||
@ -434,6 +472,7 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
||||
Ok(DetailedDocumentDeletionResult {
|
||||
deleted_documents: self.to_delete_docids.len(),
|
||||
remaining_documents: documents_ids.len(),
|
||||
soft_deletion_used: false,
|
||||
})
|
||||
}
|
||||
|
||||
@ -493,24 +532,23 @@ fn remove_from_word_prefix_docids(
|
||||
fn remove_from_word_docids(
|
||||
txn: &mut heed::RwTxn,
|
||||
db: &heed::Database<Str, RoaringBitmapCodec>,
|
||||
word: &str,
|
||||
must_remove: &mut bool,
|
||||
to_remove: &RoaringBitmap,
|
||||
words_to_keep: &mut BTreeSet<String>,
|
||||
words_to_remove: &mut BTreeSet<String>,
|
||||
) -> Result<()> {
|
||||
// We create an iterator to be able to get the content and delete the word docids.
|
||||
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
|
||||
// the LMDB B-Tree two times but only once.
|
||||
let mut iter = db.iter_mut(txn)?;
|
||||
while let Some((key, mut docids)) = iter.next().transpose()? {
|
||||
let previous_len = docids.len();
|
||||
docids -= to_remove;
|
||||
if docids.is_empty() {
|
||||
// safety: we don't keep references from inside the LMDB database.
|
||||
unsafe { iter.del_current()? };
|
||||
words_to_remove.insert(key.to_owned());
|
||||
} else {
|
||||
words_to_keep.insert(key.to_owned());
|
||||
if docids.len() != previous_len {
|
||||
let mut iter = db.prefix_iter_mut(txn, word)?;
|
||||
if let Some((key, mut docids)) = iter.next().transpose()? {
|
||||
if key == word {
|
||||
let previous_len = docids.len();
|
||||
docids -= to_remove;
|
||||
if docids.is_empty() {
|
||||
// safety: we don't keep references from inside the LMDB database.
|
||||
unsafe { iter.del_current()? };
|
||||
*must_remove = true;
|
||||
} else if docids.len() != previous_len {
|
||||
let key = key.to_owned();
|
||||
// safety: we don't keep references from inside the LMDB database.
|
||||
unsafe { iter.put_current(&key, &docids)? };
|
||||
@ -589,7 +627,7 @@ mod tests {
|
||||
|
||||
use super::*;
|
||||
use crate::index::tests::TempIndex;
|
||||
use crate::{db_snap, Filter, Search};
|
||||
use crate::{db_snap, Filter};
|
||||
|
||||
fn delete_documents<'t>(
|
||||
wtxn: &mut RwTxn<'t, '_>,
|
||||
@ -1161,52 +1199,4 @@ mod tests {
|
||||
DeletionStrategy::AlwaysSoft,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn delete_words_exact_attributes() {
|
||||
let index = TempIndex::new();
|
||||
|
||||
index
|
||||
.update_settings(|settings| {
|
||||
settings.set_primary_key(S("id"));
|
||||
settings.set_searchable_fields(vec![S("text"), S("exact")]);
|
||||
settings.set_exact_attributes(vec![S("exact")].into_iter().collect());
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
index
|
||||
.add_documents(documents!([
|
||||
{ "id": 0, "text": "hello" },
|
||||
{ "id": 1, "exact": "hello"}
|
||||
]))
|
||||
.unwrap();
|
||||
db_snap!(index, word_docids, 1, @r###"
|
||||
hello [0, ]
|
||||
"###);
|
||||
db_snap!(index, exact_word_docids, 1, @r###"
|
||||
hello [1, ]
|
||||
"###);
|
||||
db_snap!(index, words_fst, 1, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let deleted_internal_ids =
|
||||
delete_documents(&mut wtxn, &index, &["1"], DeletionStrategy::AlwaysHard);
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
db_snap!(index, word_docids, 2, @r###"
|
||||
hello [0, ]
|
||||
"###);
|
||||
db_snap!(index, exact_word_docids, 2, @"");
|
||||
db_snap!(index, words_fst, 2, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||
|
||||
insta::assert_snapshot!(format!("{deleted_internal_ids:?}"), @"[1]");
|
||||
let txn = index.read_txn().unwrap();
|
||||
let words = index.words_fst(&txn).unwrap().into_stream().into_strs().unwrap();
|
||||
insta::assert_snapshot!(format!("{words:?}"), @r###"["hello"]"###);
|
||||
|
||||
let mut s = Search::new(&txn, &index);
|
||||
s.query("hello");
|
||||
let crate::SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0]");
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::{cmp, io};
|
||||
|
||||
use grenad::Sorter;
|
||||
|
||||
@ -54,10 +54,11 @@ pub fn extract_fid_word_count_docids<R: io::Read + io::Seek>(
|
||||
}
|
||||
|
||||
for position in read_u32_ne_bytes(value) {
|
||||
let (field_id, _) = relative_from_absolute_position(position);
|
||||
let (field_id, position) = relative_from_absolute_position(position);
|
||||
let word_count = position as u32 + 1;
|
||||
|
||||
let value = document_fid_wordcount.entry(field_id as FieldId).or_insert(0);
|
||||
*value += 1;
|
||||
*value = cmp::max(*value, word_count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,7 +83,7 @@ fn drain_document_fid_wordcount_into_sorter(
|
||||
let mut key_buffer = Vec::new();
|
||||
|
||||
for (fid, count) in document_fid_wordcount.drain() {
|
||||
if count <= 30 {
|
||||
if count <= 10 {
|
||||
key_buffer.clear();
|
||||
key_buffer.extend_from_slice(&fid.to_be_bytes());
|
||||
key_buffer.push(count as u8);
|
||||
|
@ -325,6 +325,8 @@ fn send_and_extract_flattened_documents_data(
|
||||
// send docid_word_positions_chunk to DB writer
|
||||
let docid_word_positions_chunk =
|
||||
unsafe { as_cloneable_grenad(&docid_word_positions_chunk)? };
|
||||
let _ = lmdb_writer_sx
|
||||
.send(Ok(TypedChunk::DocidWordPositions(docid_word_positions_chunk.clone())));
|
||||
|
||||
let _ =
|
||||
lmdb_writer_sx.send(Ok(TypedChunk::ScriptLanguageDocids(script_language_pair)));
|
||||
|
@ -2,7 +2,7 @@ use std::sync::Arc;
|
||||
|
||||
use memmap2::Mmap;
|
||||
|
||||
/// Wrapper around Mmap allowing to virtually clone grenad-chunks
|
||||
/// Wrapper around Mmap allowing to virtualy clone grenad-chunks
|
||||
/// in a parallel process like the indexing.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ClonableMmap {
|
||||
|
@ -4,6 +4,7 @@ use std::result::Result as StdResult;
|
||||
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use super::read_u32_ne_bytes;
|
||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||
use crate::update::index_documents::transform::Operation;
|
||||
use crate::Result;
|
||||
@ -21,6 +22,10 @@ pub fn concat_u32s_array<'a>(_key: &[u8], values: &[Cow<'a, [u8]>]) -> Result<Co
|
||||
}
|
||||
}
|
||||
|
||||
pub fn roaring_bitmap_from_u32s_array(slice: &[u8]) -> RoaringBitmap {
|
||||
read_u32_ne_bytes(slice).collect()
|
||||
}
|
||||
|
||||
pub fn serialize_roaring_bitmap(bitmap: &RoaringBitmap, buffer: &mut Vec<u8>) -> io::Result<()> {
|
||||
buffer.clear();
|
||||
buffer.reserve(bitmap.serialized_size());
|
||||
|
@ -14,8 +14,8 @@ pub use grenad_helpers::{
|
||||
};
|
||||
pub use merge_functions::{
|
||||
concat_u32s_array, keep_first, keep_latest_obkv, merge_cbo_roaring_bitmaps,
|
||||
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs, serialize_roaring_bitmap,
|
||||
MergeFn,
|
||||
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs,
|
||||
roaring_bitmap_from_u32s_array, serialize_roaring_bitmap, MergeFn,
|
||||
};
|
||||
|
||||
use crate::MAX_WORD_LENGTH;
|
||||
|
@ -236,7 +236,7 @@ where
|
||||
primary_key,
|
||||
fields_ids_map,
|
||||
field_distribution,
|
||||
new_external_documents_ids,
|
||||
mut external_documents_ids,
|
||||
new_documents_ids,
|
||||
replaced_documents_ids,
|
||||
documents_count,
|
||||
@ -363,6 +363,9 @@ where
|
||||
deletion_builder.delete_documents(&replaced_documents_ids);
|
||||
let deleted_documents_result = deletion_builder.execute_inner()?;
|
||||
debug!("{} documents actually deleted", deleted_documents_result.deleted_documents);
|
||||
if !deleted_documents_result.soft_deletion_used {
|
||||
external_documents_ids.delete_soft_deleted_documents_ids_from_fsts()?;
|
||||
}
|
||||
}
|
||||
|
||||
let index_documents_ids = self.index.documents_ids(self.wtxn)?;
|
||||
@ -442,9 +445,6 @@ where
|
||||
self.index.put_primary_key(self.wtxn, &primary_key)?;
|
||||
|
||||
// We write the external documents ids into the main database.
|
||||
let mut external_documents_ids = self.index.external_documents_ids(self.wtxn)?;
|
||||
external_documents_ids.insert_ids(&new_external_documents_ids)?;
|
||||
let external_documents_ids = external_documents_ids.into_static();
|
||||
self.index.put_external_documents_ids(self.wtxn, &external_documents_ids)?;
|
||||
|
||||
let all_documents_ids = index_documents_ids | new_documents_ids;
|
||||
@ -2471,11 +2471,11 @@ mod tests {
|
||||
{
|
||||
"id": 3,
|
||||
"text": "a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a a a a a a
|
||||
a a a a a a a a a a a a a a a a a a a a a "
|
||||
}
|
||||
]))
|
||||
@ -2513,171 +2513,6 @@ mod tests {
|
||||
|
||||
db_snap!(index, word_fid_docids, 3, @"4c2e2a1832e5802796edc1638136d933");
|
||||
db_snap!(index, word_position_docids, 3, @"74f556b91d161d997a89468b4da1cb8f");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reproduce_the_bug() {
|
||||
/*
|
||||
[milli/examples/fuzz.rs:69] &batches = [
|
||||
Batch(
|
||||
[
|
||||
AddDoc(
|
||||
{ "id": 1, "doggo": "bernese" }, => internal 0
|
||||
),
|
||||
],
|
||||
),
|
||||
Batch(
|
||||
[
|
||||
DeleteDoc(
|
||||
1, => delete internal 0
|
||||
),
|
||||
AddDoc(
|
||||
{ "id": 0, "catto": "jorts" }, => internal 1
|
||||
),
|
||||
],
|
||||
),
|
||||
Batch(
|
||||
[
|
||||
AddDoc(
|
||||
{ "id": 1, "catto": "jorts" }, => internal 2
|
||||
),
|
||||
],
|
||||
),
|
||||
]
|
||||
*/
|
||||
let mut index = TempIndex::new();
|
||||
index.index_documents_config.deletion_strategy = DeletionStrategy::AlwaysHard;
|
||||
|
||||
// STARTÂ OFÂ BATCH
|
||||
|
||||
println!("--- ENTERING BATCH 1");
|
||||
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
let builder = IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&index.indexer_config,
|
||||
index.index_documents_config.clone(),
|
||||
|_| (),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// OP
|
||||
|
||||
let documents = documents!([
|
||||
{ "id": 1, "doggo": "bernese" },
|
||||
]);
|
||||
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||
|
||||
// FINISHING
|
||||
let addition = builder.execute().unwrap();
|
||||
insta::assert_debug_snapshot!(addition, @r###"
|
||||
DocumentAdditionResult {
|
||||
indexed_documents: 1,
|
||||
number_of_documents: 1,
|
||||
}
|
||||
"###);
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
db_snap!(index, documents, @r###"
|
||||
{"id":1,"doggo":"bernese"}
|
||||
"###);
|
||||
db_snap!(index, external_documents_ids, @r###"
|
||||
soft:
|
||||
hard:
|
||||
1 0
|
||||
"###);
|
||||
|
||||
// A first batch of documents has been inserted
|
||||
|
||||
// BATCH 2
|
||||
|
||||
println!("--- ENTERING BATCH 2");
|
||||
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
let builder = IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&index.indexer_config,
|
||||
index.index_documents_config.clone(),
|
||||
|_| (),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (builder, removed) = builder.remove_documents(vec![S("1")]).unwrap();
|
||||
insta::assert_display_snapshot!(removed.unwrap(), @"1");
|
||||
|
||||
let documents = documents!([
|
||||
{ "id": 0, "catto": "jorts" },
|
||||
]);
|
||||
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||
|
||||
let addition = builder.execute().unwrap();
|
||||
insta::assert_debug_snapshot!(addition, @r###"
|
||||
DocumentAdditionResult {
|
||||
indexed_documents: 1,
|
||||
number_of_documents: 1,
|
||||
}
|
||||
"###);
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
db_snap!(index, documents, @r###"
|
||||
{"id":0,"catto":"jorts"}
|
||||
"###);
|
||||
|
||||
db_snap!(index, external_documents_ids, @r###"
|
||||
soft:
|
||||
hard:
|
||||
0 1
|
||||
"###);
|
||||
|
||||
db_snap!(index, soft_deleted_documents_ids, @"[]");
|
||||
|
||||
// BATCH 3
|
||||
|
||||
println!("--- ENTERING BATCH 3");
|
||||
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
|
||||
let builder = IndexDocuments::new(
|
||||
&mut wtxn,
|
||||
&index,
|
||||
&index.indexer_config,
|
||||
index.index_documents_config.clone(),
|
||||
|_| (),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let documents = documents!([
|
||||
{ "id": 1, "catto": "jorts" },
|
||||
]);
|
||||
let (builder, added) = builder.add_documents(documents).unwrap();
|
||||
insta::assert_display_snapshot!(added.unwrap(), @"1");
|
||||
|
||||
let addition = builder.execute().unwrap();
|
||||
insta::assert_debug_snapshot!(addition, @r###"
|
||||
DocumentAdditionResult {
|
||||
indexed_documents: 1,
|
||||
number_of_documents: 2,
|
||||
}
|
||||
"###);
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
db_snap!(index, documents, @r###"
|
||||
{"id":1,"catto":"jorts"}
|
||||
{"id":0,"catto":"jorts"}
|
||||
"###);
|
||||
|
||||
// Ensuring all the returned IDs actually exists
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let res = index.search(&rtxn).execute().unwrap();
|
||||
index.documents(&rtxn, res.documents_ids).unwrap();
|
||||
db_snap!(index, docid_word_positions, 3, @"5287245332627675740b28bd46e1cde1");
|
||||
}
|
||||
}
|
||||
|
@ -21,14 +21,15 @@ use crate::error::{Error, InternalError, UserError};
|
||||
use crate::index::{db_name, main_key};
|
||||
use crate::update::{AvailableDocumentsIds, ClearDocuments, UpdateIndexingStep};
|
||||
use crate::{
|
||||
FieldDistribution, FieldId, FieldIdMapMissingEntry, FieldsIdsMap, Index, Result, BEU32,
|
||||
ExternalDocumentsIds, FieldDistribution, FieldId, FieldIdMapMissingEntry, FieldsIdsMap, Index,
|
||||
Result, BEU32,
|
||||
};
|
||||
|
||||
pub struct TransformOutput {
|
||||
pub primary_key: String,
|
||||
pub fields_ids_map: FieldsIdsMap,
|
||||
pub field_distribution: FieldDistribution,
|
||||
pub new_external_documents_ids: fst::Map<Cow<'static, [u8]>>,
|
||||
pub external_documents_ids: ExternalDocumentsIds<'static>,
|
||||
pub new_documents_ids: RoaringBitmap,
|
||||
pub replaced_documents_ids: RoaringBitmap,
|
||||
pub documents_count: usize,
|
||||
@ -567,6 +568,8 @@ impl<'a, 'i> Transform<'a, 'i> {
|
||||
}))?
|
||||
.to_string();
|
||||
|
||||
let mut external_documents_ids = self.index.external_documents_ids(wtxn)?;
|
||||
|
||||
// We create a final writer to write the new documents in order from the sorter.
|
||||
let mut writer = create_writer(
|
||||
self.indexer_settings.chunk_compression_type,
|
||||
@ -648,12 +651,13 @@ impl<'a, 'i> Transform<'a, 'i> {
|
||||
fst_new_external_documents_ids_builder.insert(key, value)
|
||||
})?;
|
||||
let new_external_documents_ids = fst_new_external_documents_ids_builder.into_map();
|
||||
external_documents_ids.insert_ids(&new_external_documents_ids)?;
|
||||
|
||||
Ok(TransformOutput {
|
||||
primary_key,
|
||||
fields_ids_map: self.fields_ids_map,
|
||||
field_distribution,
|
||||
new_external_documents_ids: new_external_documents_ids.map_data(Cow::Owned).unwrap(),
|
||||
external_documents_ids: external_documents_ids.into_static(),
|
||||
new_documents_ids: self.new_documents_ids,
|
||||
replaced_documents_ids: self.replaced_documents_ids,
|
||||
documents_count: self.documents_count,
|
||||
@ -687,8 +691,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
||||
let new_external_documents_ids = {
|
||||
let mut external_documents_ids = self.index.external_documents_ids(wtxn)?;
|
||||
external_documents_ids.delete_soft_deleted_documents_ids_from_fsts()?;
|
||||
// This call should be free and can't fail since the previous method merged both fsts.
|
||||
external_documents_ids.into_static().to_fst()?.into_owned()
|
||||
external_documents_ids
|
||||
};
|
||||
|
||||
let documents_ids = self.index.documents_ids(wtxn)?;
|
||||
@ -773,7 +776,7 @@ impl<'a, 'i> Transform<'a, 'i> {
|
||||
primary_key,
|
||||
fields_ids_map: new_fields_ids_map,
|
||||
field_distribution,
|
||||
new_external_documents_ids,
|
||||
external_documents_ids: new_external_documents_ids.into_static(),
|
||||
new_documents_ids: documents_ids,
|
||||
replaced_documents_ids: RoaringBitmap::default(),
|
||||
documents_count,
|
||||
|
@ -7,19 +7,24 @@ use std::io;
|
||||
use charabia::{Language, Script};
|
||||
use grenad::MergerBuilder;
|
||||
use heed::types::ByteSlice;
|
||||
use heed::RwTxn;
|
||||
use heed::{BytesDecode, RwTxn};
|
||||
use roaring::RoaringBitmap;
|
||||
|
||||
use super::helpers::{
|
||||
self, merge_ignore_values, serialize_roaring_bitmap, valid_lmdb_key, CursorClonableMmap,
|
||||
self, merge_ignore_values, roaring_bitmap_from_u32s_array, serialize_roaring_bitmap,
|
||||
valid_lmdb_key, CursorClonableMmap,
|
||||
};
|
||||
use super::{ClonableMmap, MergeFn};
|
||||
use crate::facet::FacetType;
|
||||
use crate::update::facet::FacetsUpdate;
|
||||
use crate::update::index_documents::helpers::as_cloneable_grenad;
|
||||
use crate::{lat_lng_to_xyz, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index, Result};
|
||||
use crate::{
|
||||
lat_lng_to_xyz, BoRoaringBitmapCodec, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index,
|
||||
Result,
|
||||
};
|
||||
|
||||
pub(crate) enum TypedChunk {
|
||||
DocidWordPositions(grenad::Reader<CursorClonableMmap>),
|
||||
FieldIdDocidFacetStrings(grenad::Reader<CursorClonableMmap>),
|
||||
FieldIdDocidFacetNumbers(grenad::Reader<CursorClonableMmap>),
|
||||
Documents(grenad::Reader<CursorClonableMmap>),
|
||||
@ -51,6 +56,29 @@ pub(crate) fn write_typed_chunk_into_index(
|
||||
) -> Result<(RoaringBitmap, bool)> {
|
||||
let mut is_merged_database = false;
|
||||
match typed_chunk {
|
||||
TypedChunk::DocidWordPositions(docid_word_positions_iter) => {
|
||||
write_entries_into_database(
|
||||
docid_word_positions_iter,
|
||||
&index.docid_word_positions,
|
||||
wtxn,
|
||||
index_is_empty,
|
||||
|value, buffer| {
|
||||
// ensure that values are unique and ordered
|
||||
let positions = roaring_bitmap_from_u32s_array(value);
|
||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
||||
Ok(buffer)
|
||||
},
|
||||
|new_values, db_values, buffer| {
|
||||
let new_values = roaring_bitmap_from_u32s_array(new_values);
|
||||
let positions = match BoRoaringBitmapCodec::bytes_decode(db_values) {
|
||||
Some(db_values) => new_values | db_values,
|
||||
None => new_values, // should not happen
|
||||
};
|
||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
}
|
||||
TypedChunk::Documents(obkv_documents_iter) => {
|
||||
let mut cursor = obkv_documents_iter.into_cursor()?;
|
||||
while let Some((key, value)) = cursor.move_on_next()? {
|
||||
|
@ -14,7 +14,7 @@ use crate::error::UserError;
|
||||
use crate::index::{DEFAULT_MIN_WORD_LEN_ONE_TYPO, DEFAULT_MIN_WORD_LEN_TWO_TYPOS};
|
||||
use crate::update::index_documents::IndexDocumentsMethod;
|
||||
use crate::update::{IndexDocuments, UpdateIndexingStep};
|
||||
use crate::{FieldsIdsMap, Index, Result};
|
||||
use crate::{FieldsIdsMap, Index, OrderBy, Result};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
|
||||
pub enum Setting<T> {
|
||||
@ -122,6 +122,7 @@ pub struct Settings<'a, 't, 'u, 'i> {
|
||||
/// Attributes on which typo tolerance is disabled.
|
||||
exact_attributes: Setting<HashSet<String>>,
|
||||
max_values_per_facet: Setting<usize>,
|
||||
sort_facet_values_by: Setting<HashMap<String, OrderBy>>,
|
||||
pagination_max_total_hits: Setting<usize>,
|
||||
}
|
||||
|
||||
@ -149,6 +150,7 @@ impl<'a, 't, 'u, 'i> Settings<'a, 't, 'u, 'i> {
|
||||
min_word_len_one_typo: Setting::NotSet,
|
||||
exact_attributes: Setting::NotSet,
|
||||
max_values_per_facet: Setting::NotSet,
|
||||
sort_facet_values_by: Setting::NotSet,
|
||||
pagination_max_total_hits: Setting::NotSet,
|
||||
indexer_config,
|
||||
}
|
||||
@ -275,6 +277,14 @@ impl<'a, 't, 'u, 'i> Settings<'a, 't, 'u, 'i> {
|
||||
self.max_values_per_facet = Setting::Reset;
|
||||
}
|
||||
|
||||
pub fn set_sort_facet_values_by(&mut self, value: HashMap<String, OrderBy>) {
|
||||
self.sort_facet_values_by = Setting::Set(value);
|
||||
}
|
||||
|
||||
pub fn reset_sort_facet_values_by(&mut self) {
|
||||
self.sort_facet_values_by = Setting::Reset;
|
||||
}
|
||||
|
||||
pub fn set_pagination_max_total_hits(&mut self, value: usize) {
|
||||
self.pagination_max_total_hits = Setting::Set(value);
|
||||
}
|
||||
@ -680,6 +690,20 @@ impl<'a, 't, 'u, 'i> Settings<'a, 't, 'u, 'i> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_sort_facet_values_by(&mut self) -> Result<()> {
|
||||
match self.sort_facet_values_by.as_ref() {
|
||||
Setting::Set(value) => {
|
||||
self.index.put_sort_facet_values_by(self.wtxn, value)?;
|
||||
}
|
||||
Setting::Reset => {
|
||||
self.index.delete_sort_facet_values_by(self.wtxn)?;
|
||||
}
|
||||
Setting::NotSet => (),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_pagination_max_total_hits(&mut self) -> Result<()> {
|
||||
match self.pagination_max_total_hits {
|
||||
Setting::Set(max) => {
|
||||
@ -714,6 +738,7 @@ impl<'a, 't, 'u, 'i> Settings<'a, 't, 'u, 'i> {
|
||||
self.update_min_typo_word_len()?;
|
||||
self.update_exact_words()?;
|
||||
self.update_max_values_per_facet()?;
|
||||
self.update_sort_facet_values_by()?;
|
||||
self.update_pagination_max_total_hits()?;
|
||||
|
||||
// If there is new faceted fields we indicate that we must reindex as we must
|
||||
@ -1515,6 +1540,7 @@ mod tests {
|
||||
exact_words,
|
||||
exact_attributes,
|
||||
max_values_per_facet,
|
||||
sort_facet_values_by,
|
||||
pagination_max_total_hits,
|
||||
} = settings;
|
||||
assert!(matches!(searchable_fields, Setting::NotSet));
|
||||
@ -1532,6 +1558,7 @@ mod tests {
|
||||
assert!(matches!(exact_words, Setting::NotSet));
|
||||
assert!(matches!(exact_attributes, Setting::NotSet));
|
||||
assert!(matches!(max_values_per_facet, Setting::NotSet));
|
||||
assert!(matches!(sort_facet_values_by, Setting::NotSet));
|
||||
assert!(matches!(pagination_max_total_hits, Setting::NotSet));
|
||||
})
|
||||
.unwrap();
|
||||
|
@ -5,7 +5,7 @@ use heed::EnvOpenOptions;
|
||||
use maplit::hashset;
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::update::{IndexDocuments, IndexDocumentsConfig, IndexerConfig, Settings};
|
||||
use milli::{FacetDistribution, Index, Object};
|
||||
use milli::{FacetDistribution, Index, Object, OrderBy};
|
||||
use serde_json::Deserializer;
|
||||
|
||||
#[test]
|
||||
@ -63,12 +63,12 @@ fn test_facet_distribution_with_no_facet_values() {
|
||||
|
||||
let txn = index.read_txn().unwrap();
|
||||
let mut distrib = FacetDistribution::new(&txn, &index);
|
||||
distrib.facets(vec!["genres"]);
|
||||
distrib.facets(vec![("genres", OrderBy::default())]);
|
||||
let result = distrib.execute().unwrap();
|
||||
assert_eq!(result["genres"].len(), 0);
|
||||
|
||||
let mut distrib = FacetDistribution::new(&txn, &index);
|
||||
distrib.facets(vec!["tags"]);
|
||||
distrib.facets(vec![("tags", OrderBy::default())]);
|
||||
let result = distrib.execute().unwrap();
|
||||
assert_eq!(result["tags"].len(), 2);
|
||||
}
|
||||
|
Reference in New Issue
Block a user