mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-12-11 23:25:41 +00:00
Compare commits
78 Commits
reduce-mx-
...
reduce-max
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6868dbd47 | ||
|
|
8628a0c856 | ||
|
|
c1e3cc04b0 | ||
|
|
d96d8bb0dd | ||
|
|
4a3405afec | ||
|
|
3cfd653db1 | ||
|
|
f3e2f79290 | ||
|
|
f517274d1f | ||
|
|
3f41bc642a | ||
|
|
672abdb341 | ||
|
|
a13ed4d0b0 | ||
|
|
4cc2988482 | ||
|
|
26c7e31f25 | ||
|
|
b2dee07b5e | ||
|
|
d963b5f85a | ||
|
|
2acc3ec5ee | ||
|
|
da04edff8c | ||
|
|
85a80f4f4c | ||
|
|
1213ec7164 | ||
|
|
0a7817a002 | ||
|
|
1dfc4038ab | ||
|
|
73198179f1 | ||
|
|
51dce9e9d1 | ||
|
|
c9b65677bf | ||
|
|
35d5556f1f | ||
|
|
c433bdd1cd | ||
|
|
2db09725f8 | ||
|
|
fdb23132d4 | ||
|
|
11b95284cd | ||
|
|
1b601f70c6 | ||
|
|
8185731bbf | ||
|
|
840727d76f | ||
|
|
ead07d0b9d | ||
|
|
44f231d41e | ||
|
|
3c5d1c93de | ||
|
|
087866d59f | ||
|
|
9111f5176f | ||
|
|
b9dd092a62 | ||
|
|
ca99bc3188 | ||
|
|
57d53de402 | ||
|
|
2e49d6aec1 | ||
|
|
51043f78f0 | ||
|
|
a490a11325 | ||
|
|
101f5a20d2 | ||
|
|
6ce1ce77e6 | ||
|
|
ec8f685d84 | ||
|
|
5758268866 | ||
|
|
4d037e6693 | ||
|
|
96da5130a4 | ||
|
|
3e19702de6 | ||
|
|
1e762d151f | ||
|
|
0b38f211ac | ||
|
|
f6524a6858 | ||
|
|
65ad8cce36 | ||
|
|
42650f82e8 | ||
|
|
a37da36766 | ||
|
|
85d96d35a8 | ||
|
|
bf66e97b48 | ||
|
|
a7ea5ec748 | ||
|
|
dc7ba77e57 | ||
|
|
13f870e993 | ||
|
|
1a79fd0c3c | ||
|
|
f759ec7fad | ||
|
|
4d691d071a | ||
|
|
23d1c86825 | ||
|
|
c4a40e7110 | ||
|
|
e01980c6f4 | ||
|
|
25209a3590 | ||
|
|
3064ea6495 | ||
|
|
46ec8a97e9 | ||
|
|
c42a65a297 | ||
|
|
d08f8690d2 | ||
|
|
ad5f25d880 | ||
|
|
4d352a21ac | ||
|
|
918ce1dd67 | ||
|
|
4a4210c116 | ||
|
|
3533d4f2bb | ||
|
|
8095f21999 |
@@ -2,4 +2,3 @@ target
|
|||||||
Dockerfile
|
Dockerfile
|
||||||
.dockerignore
|
.dockerignore
|
||||||
.gitignore
|
.gitignore
|
||||||
**/.git
|
|
||||||
|
|||||||
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
2
.github/workflows/publish-apt-brew-pkg.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
|||||||
- name: Build deb package
|
- name: Build deb package
|
||||||
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
run: cargo deb -p meilisearch -o target/debian/meilisearch.deb
|
||||||
- name: Upload debian pkg to release
|
- name: Upload debian pkg to release
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/debian/meilisearch.deb
|
file: target/debian/meilisearch.deb
|
||||||
|
|||||||
8
.github/workflows/publish-binaries.yml
vendored
8
.github/workflows/publish-binaries.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
|||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/meilisearch
|
file: target/release/meilisearch
|
||||||
@@ -87,7 +87,7 @@ jobs:
|
|||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
- name: Upload binaries to release
|
- name: Upload binaries to release
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/release/${{ matrix.artifact_name }}
|
file: target/release/${{ matrix.artifact_name }}
|
||||||
@@ -121,7 +121,7 @@ jobs:
|
|||||||
- name: Upload the binary to release
|
- name: Upload the binary to release
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
@@ -183,7 +183,7 @@ jobs:
|
|||||||
- name: Upload the binary to release
|
- name: Upload the binary to release
|
||||||
# No need to upload binaries for dry run (cron)
|
# No need to upload binaries for dry run (cron)
|
||||||
if: github.event_name == 'release'
|
if: github.event_name == 'release'
|
||||||
uses: svenstaro/upload-release-action@2.5.0
|
uses: svenstaro/upload-release-action@2.6.1
|
||||||
with:
|
with:
|
||||||
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
repo_token: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||||
file: target/${{ matrix.target }}/release/meilisearch
|
file: target/${{ matrix.target }}/release/meilisearch
|
||||||
|
|||||||
7
.github/workflows/publish-docker-images.yml
vendored
7
.github/workflows/publish-docker-images.yml
vendored
@@ -58,13 +58,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
|
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v2
|
||||||
@@ -92,13 +88,10 @@ jobs:
|
|||||||
push: true
|
push: true
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
builder: ${{ steps.buildx.outputs.name }}
|
|
||||||
build-args: |
|
build-args: |
|
||||||
COMMIT_SHA=${{ github.sha }}
|
COMMIT_SHA=${{ github.sha }}
|
||||||
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
COMMIT_DATE=${{ steps.build-metadata.outputs.date }}
|
||||||
GIT_TAG=${{ github.ref_name }}
|
GIT_TAG=${{ github.ref_name }}
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|
||||||
# /!\ Don't touch this without checking with Cloud team
|
# /!\ Don't touch this without checking with Cloud team
|
||||||
- name: Send CI information to Cloud team
|
- name: Send CI information to Cloud team
|
||||||
|
|||||||
21
.github/workflows/sdks-tests.yml
vendored
21
.github/workflows/sdks-tests.yml
vendored
@@ -3,6 +3,11 @@ name: SDKs tests
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
docker_image:
|
||||||
|
description: 'The Meilisearch Docker image used'
|
||||||
|
required: false
|
||||||
|
default: nightly
|
||||||
schedule:
|
schedule:
|
||||||
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
- cron: "0 6 * * MON" # Every Monday at 6:00AM
|
||||||
|
|
||||||
@@ -17,7 +22,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -51,7 +56,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -77,7 +82,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -107,7 +112,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -131,7 +136,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -139,7 +144,7 @@ jobs:
|
|||||||
- '7700:7700'
|
- '7700:7700'
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: stable
|
go-version: stable
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@@ -160,7 +165,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
@@ -184,7 +189,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
meilisearch:
|
meilisearch:
|
||||||
image: getmeili/meilisearch:nightly
|
image: getmeili/meilisearch:${{ github.event.inputs.docker_image }}
|
||||||
env:
|
env:
|
||||||
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
MEILI_MASTER_KEY: ${{ env.MEILI_MASTER_KEY }}
|
||||||
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
MEILI_NO_ANALYTICS: ${{ env.MEILI_NO_ANALYTICS }}
|
||||||
|
|||||||
33
.github/workflows/test-suite.yml
vendored
33
.github/workflows/test-suite.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
toolchain: nightly
|
toolchain: nightly
|
||||||
override: true
|
override: true
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -65,7 +65,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo check without any default features
|
- name: Run cargo check without any default features
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -105,6 +105,29 @@ jobs:
|
|||||||
command: test
|
command: test
|
||||||
args: --workspace --locked --release --all-features
|
args: --workspace --locked --release --all-features
|
||||||
|
|
||||||
|
test-disabled-tokenization:
|
||||||
|
name: Test disabled tokenization
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: ubuntu:18.04
|
||||||
|
if: github.event_name == 'schedule'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install needed dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update
|
||||||
|
apt-get install --assume-yes build-essential curl
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- name: Run cargo tree without default features and check lindera is not present
|
||||||
|
run: |
|
||||||
|
cargo tree -f '{p} {f}' -e normal --no-default-features | grep lindera -vqz
|
||||||
|
- name: Run cargo tree with default features and check lindera is pressent
|
||||||
|
run: |
|
||||||
|
cargo tree -f '{p} {f}' -e normal | grep lindera -qz
|
||||||
|
|
||||||
# We run tests in debug also, to make sure that the debug_assertions are hit
|
# We run tests in debug also, to make sure that the debug_assertions are hit
|
||||||
test-debug:
|
test-debug:
|
||||||
name: Run tests in debug
|
name: Run tests in debug
|
||||||
@@ -123,7 +146,7 @@ jobs:
|
|||||||
toolchain: stable
|
toolchain: stable
|
||||||
override: true
|
override: true
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -142,7 +165,7 @@ jobs:
|
|||||||
override: true
|
override: true
|
||||||
components: clippy
|
components: clippy
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
uses: actions-rs/cargo@v1
|
uses: actions-rs/cargo@v1
|
||||||
with:
|
with:
|
||||||
@@ -161,7 +184,7 @@ jobs:
|
|||||||
override: true
|
override: true
|
||||||
components: rustfmt
|
components: rustfmt
|
||||||
- name: Cache dependencies
|
- name: Cache dependencies
|
||||||
uses: Swatinem/rust-cache@v2.2.1
|
uses: Swatinem/rust-cache@v2.4.0
|
||||||
- name: Run cargo fmt
|
- name: Run cargo fmt
|
||||||
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
# Since we never ran the `build.rs` script in the benchmark directory we are missing one auto-generated import file.
|
||||||
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
# Since we want to trigger (and fail) this action as fast as possible, instead of building the benchmark crate
|
||||||
|
|||||||
32
Cargo.lock
generated
32
Cargo.lock
generated
@@ -463,7 +463,7 @@ checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "benchmarks"
|
name = "benchmarks"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -1209,7 +1209,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dump"
|
name = "dump"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"big_s",
|
"big_s",
|
||||||
@@ -1428,7 +1428,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "file-store"
|
name = "file-store"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"faux",
|
"faux",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
@@ -1450,7 +1450,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "filter-parser"
|
name = "filter-parser"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"insta",
|
"insta",
|
||||||
"nom",
|
"nom",
|
||||||
@@ -1476,7 +1476,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "flatten-serde-json"
|
name = "flatten-serde-json"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"criterion",
|
"criterion",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -1794,7 +1794,7 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "heed"
|
name = "heed"
|
||||||
version = "0.12.5"
|
version = "0.12.5"
|
||||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"heed-traits",
|
"heed-traits",
|
||||||
@@ -1811,12 +1811,12 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "heed-traits"
|
name = "heed-traits"
|
||||||
version = "0.7.0"
|
version = "0.7.0"
|
||||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heed-types"
|
name = "heed-types"
|
||||||
version = "0.7.2"
|
version = "0.7.2"
|
||||||
source = "git+https://github.com/meilisearch/heed?tag=v0.12.5#4158a6c484752afaaf9e2530a6ee0e7ab0f24ee8"
|
source = "git+https://github.com/meilisearch/heed?tag=v0.12.6#8c5b94225fc949c02bb7b900cc50ffaf6b584b1e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"heed-traits",
|
"heed-traits",
|
||||||
@@ -1959,7 +1959,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "index-scheduler"
|
name = "index-scheduler"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"big_s",
|
"big_s",
|
||||||
@@ -2113,7 +2113,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "json-depth-checker"
|
name = "json-depth-checker"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"criterion",
|
"criterion",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
@@ -2539,7 +2539,7 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meili-snap"
|
name = "meili-snap"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"insta",
|
"insta",
|
||||||
"md5",
|
"md5",
|
||||||
@@ -2548,7 +2548,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meilisearch"
|
name = "meilisearch"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-cors",
|
"actix-cors",
|
||||||
"actix-http",
|
"actix-http",
|
||||||
@@ -2636,7 +2636,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meilisearch-auth"
|
name = "meilisearch-auth"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.21.0",
|
"base64 0.21.0",
|
||||||
"enum-iterator",
|
"enum-iterator",
|
||||||
@@ -2655,7 +2655,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "meilisearch-types"
|
name = "meilisearch-types"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
@@ -2709,7 +2709,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "milli"
|
name = "milli"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"big_s",
|
"big_s",
|
||||||
"bimap",
|
"bimap",
|
||||||
@@ -3064,7 +3064,7 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "permissive-json-pointer"
|
name = "permissive-json-pointer"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"big_s",
|
"big_s",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ members = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||||
description = "Meilisearch HTTP server"
|
description = "Meilisearch HTTP server"
|
||||||
homepage = "https://meilisearch.com"
|
homepage = "https://meilisearch.com"
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
# Compile
|
# Compile
|
||||||
FROM rust:alpine3.16 AS compiler
|
FROM rust:alpine3.16 AS compiler
|
||||||
|
|
||||||
@@ -12,7 +11,7 @@ ARG GIT_TAG
|
|||||||
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
ENV VERGEN_GIT_SHA=${COMMIT_SHA} VERGEN_GIT_COMMIT_TIMESTAMP=${COMMIT_DATE} VERGEN_GIT_SEMVER_LIGHTWEIGHT=${GIT_TAG}
|
||||||
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
ENV RUSTFLAGS="-C target-feature=-crt-static"
|
||||||
|
|
||||||
COPY --link . .
|
COPY . .
|
||||||
RUN set -eux; \
|
RUN set -eux; \
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
if [ "$apkArch" = "aarch64" ]; then \
|
if [ "$apkArch" = "aarch64" ]; then \
|
||||||
@@ -31,7 +30,7 @@ RUN apk update --quiet \
|
|||||||
|
|
||||||
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
# add meilisearch to the `/bin` so you can run it from anywhere and it's easy
|
||||||
# to find.
|
# to find.
|
||||||
COPY --from=compiler --link /meilisearch/target/release/meilisearch /bin/meilisearch
|
COPY --from=compiler /meilisearch/target/release/meilisearch /bin/meilisearch
|
||||||
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
# To stay compatible with the older version of the container (pre v0.27.0) we're
|
||||||
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
# going to symlink the meilisearch binary in the path to `/meilisearch`
|
||||||
RUN ln -s /bin/meilisearch /meilisearch
|
RUN ln -s /bin/meilisearch /meilisearch
|
||||||
|
|||||||
1376
assets/grafana-dashboard.json
Normal file
1376
assets/grafana-dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
19
assets/prometheus-basic-scraper.yml
Normal file
19
assets/prometheus-basic-scraper.yml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
global:
|
||||||
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
|
||||||
|
# Attach these labels to any time series or alerts when communicating with
|
||||||
|
# external systems (federation, remote storage, Alertmanager).
|
||||||
|
external_labels:
|
||||||
|
monitor: 'codelab-monitor'
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
|
- job_name: 'meilisearch'
|
||||||
|
|
||||||
|
# Override the global default and scrape targets from this job every 5 seconds.
|
||||||
|
scrape_interval: 5s
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:7700']
|
||||||
55
config.toml
55
config.toml
@@ -1,128 +1,131 @@
|
|||||||
# This file shows the default configuration of Meilisearch.
|
# This file shows the default configuration of Meilisearch.
|
||||||
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
# All variables are defined here: https://www.meilisearch.com/docs/learn/configuration/instance_options#environment-variables
|
||||||
|
|
||||||
db_path = "./data.ms"
|
|
||||||
# Designates the location where database files will be created and retrieved.
|
# Designates the location where database files will be created and retrieved.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#database-path
|
||||||
|
db_path = "./data.ms"
|
||||||
|
|
||||||
env = "development"
|
|
||||||
# Configures the instance's environment. Value must be either `production` or `development`.
|
# Configures the instance's environment. Value must be either `production` or `development`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#environment
|
||||||
|
env = "development"
|
||||||
|
|
||||||
http_addr = "localhost:7700"
|
|
||||||
# The address on which the HTTP server will listen.
|
# The address on which the HTTP server will listen.
|
||||||
|
http_addr = "localhost:7700"
|
||||||
|
|
||||||
# master_key = "YOUR_MASTER_KEY_VALUE"
|
|
||||||
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
# Sets the instance's master key, automatically protecting all routes except GET /health.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#master-key
|
||||||
|
# master_key = "YOUR_MASTER_KEY_VALUE"
|
||||||
|
|
||||||
# no_analytics = true
|
|
||||||
# Deactivates Meilisearch's built-in telemetry when provided.
|
# Deactivates Meilisearch's built-in telemetry when provided.
|
||||||
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
# Meilisearch automatically collects data from all instances that do not opt out using this flag.
|
||||||
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
# All gathered data is used solely for the purpose of improving Meilisearch, and can be deleted at any time.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#disable-analytics
|
||||||
|
# no_analytics = true
|
||||||
|
|
||||||
http_payload_size_limit = "100 MB"
|
|
||||||
# Sets the maximum size of accepted payloads.
|
# Sets the maximum size of accepted payloads.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#payload-limit-size
|
||||||
|
http_payload_size_limit = "100 MB"
|
||||||
|
|
||||||
log_level = "INFO"
|
|
||||||
# Defines how much detail should be present in Meilisearch's logs.
|
# Defines how much detail should be present in Meilisearch's logs.
|
||||||
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
# Meilisearch currently supports six log levels, listed in order of increasing verbosity: `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#log-level
|
||||||
|
log_level = "INFO"
|
||||||
|
|
||||||
# max_indexing_memory = "2 GiB"
|
|
||||||
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
# Sets the maximum amount of RAM Meilisearch can use when indexing.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-memory
|
||||||
|
# max_indexing_memory = "2 GiB"
|
||||||
|
|
||||||
# max_indexing_threads = 4
|
|
||||||
# Sets the maximum number of threads Meilisearch can use during indexing.
|
# Sets the maximum number of threads Meilisearch can use during indexing.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#max-indexing-threads
|
||||||
|
# max_indexing_threads = 4
|
||||||
|
|
||||||
#############
|
#############
|
||||||
### DUMPS ###
|
### DUMPS ###
|
||||||
#############
|
#############
|
||||||
|
|
||||||
dump_dir = "dumps/"
|
|
||||||
# Sets the directory where Meilisearch will create dump files.
|
# Sets the directory where Meilisearch will create dump files.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#dump-directory
|
||||||
|
dump_dir = "dumps/"
|
||||||
|
|
||||||
# import_dump = "./path/to/my/file.dump"
|
|
||||||
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
# Imports the dump file located at the specified path. Path must point to a .dump file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-dump
|
||||||
|
# import_dump = "./path/to/my/file.dump"
|
||||||
|
|
||||||
ignore_missing_dump = false
|
|
||||||
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
# Prevents Meilisearch from throwing an error when `import_dump` does not point to a valid dump file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-dump
|
||||||
|
ignore_missing_dump = false
|
||||||
|
|
||||||
ignore_dump_if_db_exists = false
|
|
||||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_dump`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-dump-if-db-exists
|
||||||
|
ignore_dump_if_db_exists = false
|
||||||
|
|
||||||
|
|
||||||
#################
|
#################
|
||||||
### SNAPSHOTS ###
|
### SNAPSHOTS ###
|
||||||
#################
|
#################
|
||||||
|
|
||||||
schedule_snapshot = false
|
|
||||||
# Enables scheduled snapshots when true, disable when false (the default).
|
# Enables scheduled snapshots when true, disable when false (the default).
|
||||||
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
# If the value is given as an integer, then enables the scheduled snapshot with the passed value as the interval
|
||||||
# between each snapshot, in seconds.
|
# between each snapshot, in seconds.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#schedule-snapshot-creation
|
||||||
|
schedule_snapshot = false
|
||||||
|
|
||||||
snapshot_dir = "snapshots/"
|
|
||||||
# Sets the directory where Meilisearch will store snapshots.
|
# Sets the directory where Meilisearch will store snapshots.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#snapshot-destination
|
||||||
|
snapshot_dir = "snapshots/"
|
||||||
|
|
||||||
# import_snapshot = "./path/to/my/snapshot"
|
|
||||||
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
# Launches Meilisearch after importing a previously-generated snapshot at the given filepath.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#import-snapshot
|
||||||
|
# import_snapshot = "./path/to/my/snapshot"
|
||||||
|
|
||||||
ignore_missing_snapshot = false
|
|
||||||
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
# Prevents a Meilisearch instance from throwing an error when `import_snapshot` does not point to a valid snapshot file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-missing-snapshot
|
||||||
|
ignore_missing_snapshot = false
|
||||||
|
|
||||||
ignore_snapshot_if_db_exists = false
|
|
||||||
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
# Prevents a Meilisearch instance with an existing database from throwing an error when using `import_snapshot`.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ignore-snapshot-if-db-exists
|
||||||
|
ignore_snapshot_if_db_exists = false
|
||||||
|
|
||||||
|
|
||||||
###########
|
###########
|
||||||
### SSL ###
|
### SSL ###
|
||||||
###########
|
###########
|
||||||
|
|
||||||
# ssl_auth_path = "./path/to/root"
|
|
||||||
# Enables client authentication in the specified path.
|
# Enables client authentication in the specified path.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-authentication-path
|
||||||
|
# ssl_auth_path = "./path/to/root"
|
||||||
|
|
||||||
# ssl_cert_path = "./path/to/certfile"
|
|
||||||
# Sets the server's SSL certificates.
|
# Sets the server's SSL certificates.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-certificates-path
|
||||||
|
# ssl_cert_path = "./path/to/certfile"
|
||||||
|
|
||||||
# ssl_key_path = "./path/to/private-key"
|
|
||||||
# Sets the server's SSL key files.
|
# Sets the server's SSL key files.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-key-path
|
||||||
|
# ssl_key_path = "./path/to/private-key"
|
||||||
|
|
||||||
# ssl_ocsp_path = "./path/to/ocsp-file"
|
|
||||||
# Sets the server's OCSP file.
|
# Sets the server's OCSP file.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-ocsp-path
|
||||||
|
# ssl_ocsp_path = "./path/to/ocsp-file"
|
||||||
|
|
||||||
ssl_require_auth = false
|
|
||||||
# Makes SSL authentication mandatory.
|
# Makes SSL authentication mandatory.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-require-auth
|
||||||
|
ssl_require_auth = false
|
||||||
|
|
||||||
ssl_resumption = false
|
|
||||||
# Activates SSL session resumption.
|
# Activates SSL session resumption.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-resumption
|
||||||
|
ssl_resumption = false
|
||||||
|
|
||||||
ssl_tickets = false
|
|
||||||
# Activates SSL tickets.
|
# Activates SSL tickets.
|
||||||
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
# https://www.meilisearch.com/docs/learn/configuration/instance_options#ssl-tickets
|
||||||
|
ssl_tickets = false
|
||||||
|
|
||||||
#############################
|
#############################
|
||||||
### Experimental features ###
|
### Experimental features ###
|
||||||
#############################
|
#############################
|
||||||
|
|
||||||
experimental_enable_metrics = false
|
|
||||||
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
# Experimental metrics feature. For more information, see: <https://github.com/meilisearch/meilisearch/discussions/3518>
|
||||||
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
# Enables the Prometheus metrics on the `GET /metrics` endpoint.
|
||||||
|
experimental_enable_metrics = false
|
||||||
|
|
||||||
|
# Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||||
|
experimental_reduce_indexing_memory_usage = false
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -24,6 +24,7 @@ use std::io::BufWriter;
|
|||||||
|
|
||||||
use dump::IndexMetadata;
|
use dump::IndexMetadata;
|
||||||
use log::{debug, error, info};
|
use log::{debug, error, info};
|
||||||
|
use meilisearch_types::error::Code;
|
||||||
use meilisearch_types::heed::{RoTxn, RwTxn};
|
use meilisearch_types::heed::{RoTxn, RwTxn};
|
||||||
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
use meilisearch_types::milli::documents::{obkv_to_object, DocumentsBatchReader};
|
||||||
use meilisearch_types::milli::heed::CompactionOption;
|
use meilisearch_types::milli::heed::CompactionOption;
|
||||||
@@ -1491,7 +1492,12 @@ fn delete_document_by_filter(filter: &serde_json::Value, index: Index) -> Result
|
|||||||
Ok(if let Some(filter) = filter {
|
Ok(if let Some(filter) = filter {
|
||||||
let mut wtxn = index.write_txn()?;
|
let mut wtxn = index.write_txn()?;
|
||||||
|
|
||||||
let candidates = filter.evaluate(&wtxn, &index)?;
|
let candidates = filter.evaluate(&wtxn, &index).map_err(|err| match err {
|
||||||
|
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
||||||
|
Error::from(err).with_custom_error_code(Code::InvalidDocumentFilter)
|
||||||
|
}
|
||||||
|
e => e.into(),
|
||||||
|
})?;
|
||||||
let mut delete_operation = DeleteDocuments::new(&mut wtxn, &index)?;
|
let mut delete_operation = DeleteDocuments::new(&mut wtxn, &index)?;
|
||||||
delete_operation.delete_documents(&candidates);
|
delete_operation.delete_documents(&candidates);
|
||||||
let deleted_documents =
|
let deleted_documents =
|
||||||
|
|||||||
@@ -46,6 +46,8 @@ impl From<DateField> for Code {
|
|||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Error, Debug)]
|
#[derive(Error, Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
#[error("{1}")]
|
||||||
|
WithCustomErrorCode(Code, Box<Self>),
|
||||||
#[error("Index `{0}` not found.")]
|
#[error("Index `{0}` not found.")]
|
||||||
IndexNotFound(String),
|
IndexNotFound(String),
|
||||||
#[error("Index `{0}` already exists.")]
|
#[error("Index `{0}` already exists.")]
|
||||||
@@ -144,6 +146,7 @@ impl Error {
|
|||||||
pub fn is_recoverable(&self) -> bool {
|
pub fn is_recoverable(&self) -> bool {
|
||||||
match self {
|
match self {
|
||||||
Error::IndexNotFound(_)
|
Error::IndexNotFound(_)
|
||||||
|
| Error::WithCustomErrorCode(_, _)
|
||||||
| Error::IndexAlreadyExists(_)
|
| Error::IndexAlreadyExists(_)
|
||||||
| Error::SwapDuplicateIndexFound(_)
|
| Error::SwapDuplicateIndexFound(_)
|
||||||
| Error::SwapDuplicateIndexesFound(_)
|
| Error::SwapDuplicateIndexesFound(_)
|
||||||
@@ -176,11 +179,16 @@ impl Error {
|
|||||||
Error::PlannedFailure => false,
|
Error::PlannedFailure => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_custom_error_code(self, code: Code) -> Self {
|
||||||
|
Self::WithCustomErrorCode(code, Box::new(self))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ErrorCode for Error {
|
impl ErrorCode for Error {
|
||||||
fn error_code(&self) -> Code {
|
fn error_code(&self) -> Code {
|
||||||
match self {
|
match self {
|
||||||
|
Error::WithCustomErrorCode(code, _) => *code,
|
||||||
Error::IndexNotFound(_) => Code::IndexNotFound,
|
Error::IndexNotFound(_) => Code::IndexNotFound,
|
||||||
Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists,
|
Error::IndexAlreadyExists(_) => Code::IndexAlreadyExists,
|
||||||
Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
Error::SwapDuplicateIndexesFound(_) => Code::InvalidSwapDuplicateIndexFound,
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ use std::collections::BTreeMap;
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use meilisearch_types::heed::flags::Flags;
|
||||||
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
|
use meilisearch_types::heed::{EnvClosingEvent, EnvOpenOptions};
|
||||||
use meilisearch_types::milli::Index;
|
use meilisearch_types::milli::Index;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
@@ -53,6 +54,7 @@ pub struct IndexMap {
|
|||||||
pub struct ClosingIndex {
|
pub struct ClosingIndex {
|
||||||
uuid: Uuid,
|
uuid: Uuid,
|
||||||
closing_event: EnvClosingEvent,
|
closing_event: EnvClosingEvent,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
map_size: usize,
|
map_size: usize,
|
||||||
generation: usize,
|
generation: usize,
|
||||||
}
|
}
|
||||||
@@ -68,6 +70,7 @@ impl ClosingIndex {
|
|||||||
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
|
pub fn wait_timeout(self, timeout: Duration) -> Option<ReopenableIndex> {
|
||||||
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
|
self.closing_event.wait_timeout(timeout).then_some(ReopenableIndex {
|
||||||
uuid: self.uuid,
|
uuid: self.uuid,
|
||||||
|
enable_mdb_writemap: self.enable_mdb_writemap,
|
||||||
map_size: self.map_size,
|
map_size: self.map_size,
|
||||||
generation: self.generation,
|
generation: self.generation,
|
||||||
})
|
})
|
||||||
@@ -76,6 +79,7 @@ impl ClosingIndex {
|
|||||||
|
|
||||||
pub struct ReopenableIndex {
|
pub struct ReopenableIndex {
|
||||||
uuid: Uuid,
|
uuid: Uuid,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
map_size: usize,
|
map_size: usize,
|
||||||
generation: usize,
|
generation: usize,
|
||||||
}
|
}
|
||||||
@@ -103,7 +107,7 @@ impl ReopenableIndex {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
map.unavailable.remove(&self.uuid);
|
map.unavailable.remove(&self.uuid);
|
||||||
map.create(&self.uuid, path, None, self.map_size)?;
|
map.create(&self.uuid, path, None, self.enable_mdb_writemap, self.map_size)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -170,16 +174,17 @@ impl IndexMap {
|
|||||||
uuid: &Uuid,
|
uuid: &Uuid,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
map_size: usize,
|
map_size: usize,
|
||||||
) -> Result<Index> {
|
) -> Result<Index> {
|
||||||
if !matches!(self.get_unavailable(uuid), Missing) {
|
if !matches!(self.get_unavailable(uuid), Missing) {
|
||||||
panic!("Attempt to open an index that was unavailable");
|
panic!("Attempt to open an index that was unavailable");
|
||||||
}
|
}
|
||||||
let index = create_or_open_index(path, date, map_size)?;
|
let index = create_or_open_index(path, date, enable_mdb_writemap, map_size)?;
|
||||||
match self.available.insert(*uuid, index.clone()) {
|
match self.available.insert(*uuid, index.clone()) {
|
||||||
InsertionOutcome::InsertedNew => (),
|
InsertionOutcome::InsertedNew => (),
|
||||||
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
InsertionOutcome::Evicted(evicted_uuid, evicted_index) => {
|
||||||
self.close(evicted_uuid, evicted_index, 0);
|
self.close(evicted_uuid, evicted_index, enable_mdb_writemap, 0);
|
||||||
}
|
}
|
||||||
InsertionOutcome::Replaced(_) => {
|
InsertionOutcome::Replaced(_) => {
|
||||||
panic!("Attempt to open an index that was already opened")
|
panic!("Attempt to open an index that was already opened")
|
||||||
@@ -212,17 +217,30 @@ impl IndexMap {
|
|||||||
/// | Closing | Closing |
|
/// | Closing | Closing |
|
||||||
/// | Available | Closing |
|
/// | Available | Closing |
|
||||||
///
|
///
|
||||||
pub fn close_for_resize(&mut self, uuid: &Uuid, map_size_growth: usize) {
|
pub fn close_for_resize(
|
||||||
|
&mut self,
|
||||||
|
uuid: &Uuid,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
|
map_size_growth: usize,
|
||||||
|
) {
|
||||||
let Some(index) = self.available.remove(uuid) else { return; };
|
let Some(index) = self.available.remove(uuid) else { return; };
|
||||||
self.close(*uuid, index, map_size_growth);
|
self.close(*uuid, index, enable_mdb_writemap, map_size_growth);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close(&mut self, uuid: Uuid, index: Index, map_size_growth: usize) {
|
fn close(
|
||||||
|
&mut self,
|
||||||
|
uuid: Uuid,
|
||||||
|
index: Index,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
|
map_size_growth: usize,
|
||||||
|
) {
|
||||||
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
|
let map_size = index.map_size().unwrap_or(DEFAULT_MAP_SIZE) + map_size_growth;
|
||||||
let closing_event = index.prepare_for_closing();
|
let closing_event = index.prepare_for_closing();
|
||||||
let generation = self.next_generation();
|
let generation = self.next_generation();
|
||||||
self.unavailable
|
self.unavailable.insert(
|
||||||
.insert(uuid, Some(ClosingIndex { uuid, closing_event, map_size, generation }));
|
uuid,
|
||||||
|
Some(ClosingIndex { uuid, closing_event, enable_mdb_writemap, map_size, generation }),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Attempts to delete and index.
|
/// Attempts to delete and index.
|
||||||
@@ -282,11 +300,15 @@ impl IndexMap {
|
|||||||
fn create_or_open_index(
|
fn create_or_open_index(
|
||||||
path: &Path,
|
path: &Path,
|
||||||
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
date: Option<(OffsetDateTime, OffsetDateTime)>,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
map_size: usize,
|
map_size: usize,
|
||||||
) -> Result<Index> {
|
) -> Result<Index> {
|
||||||
let mut options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
options.map_size(clamp_to_page_size(map_size));
|
options.map_size(clamp_to_page_size(map_size));
|
||||||
options.max_readers(1024);
|
options.max_readers(1024);
|
||||||
|
if enable_mdb_writemap {
|
||||||
|
unsafe { options.flag(Flags::MdbWriteMap) };
|
||||||
|
}
|
||||||
|
|
||||||
if let Some((created, updated)) = date {
|
if let Some((created, updated)) = date {
|
||||||
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
Ok(Index::new_with_creation_dates(options, path, created, updated)?)
|
||||||
|
|||||||
@@ -66,6 +66,8 @@ pub struct IndexMapper {
|
|||||||
index_base_map_size: usize,
|
index_base_map_size: usize,
|
||||||
/// The quantity by which the map size of an index is incremented upon reopening, in bytes.
|
/// The quantity by which the map size of an index is incremented upon reopening, in bytes.
|
||||||
index_growth_amount: usize,
|
index_growth_amount: usize,
|
||||||
|
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
pub indexer_config: Arc<IndexerConfig>,
|
pub indexer_config: Arc<IndexerConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -88,8 +90,17 @@ pub enum IndexStatus {
|
|||||||
pub struct IndexStats {
|
pub struct IndexStats {
|
||||||
/// Number of documents in the index.
|
/// Number of documents in the index.
|
||||||
pub number_of_documents: u64,
|
pub number_of_documents: u64,
|
||||||
/// Size of the index' DB, in bytes.
|
/// Size taken up by the index' DB, in bytes.
|
||||||
|
///
|
||||||
|
/// This includes the size taken by both the used and free pages of the DB, and as the free pages
|
||||||
|
/// are not returned to the disk after a deletion, this number is typically larger than
|
||||||
|
/// `used_database_size` that only includes the size of the used pages.
|
||||||
pub database_size: u64,
|
pub database_size: u64,
|
||||||
|
/// Size taken by the used pages of the index' DB, in bytes.
|
||||||
|
///
|
||||||
|
/// As the DB backend does not return to the disk the pages that are not currently used by the DB,
|
||||||
|
/// this value is typically smaller than `database_size`.
|
||||||
|
pub used_database_size: u64,
|
||||||
/// Association of every field name with the number of times it occurs in the documents.
|
/// Association of every field name with the number of times it occurs in the documents.
|
||||||
pub field_distribution: FieldDistribution,
|
pub field_distribution: FieldDistribution,
|
||||||
/// Creation date of the index.
|
/// Creation date of the index.
|
||||||
@@ -105,10 +116,10 @@ impl IndexStats {
|
|||||||
///
|
///
|
||||||
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
/// - rtxn: a RO transaction for the index, obtained from `Index::read_txn()`.
|
||||||
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
pub fn new(index: &Index, rtxn: &RoTxn) -> Result<Self> {
|
||||||
let database_size = index.on_disk_size()?;
|
|
||||||
Ok(IndexStats {
|
Ok(IndexStats {
|
||||||
number_of_documents: index.number_of_documents(rtxn)?,
|
number_of_documents: index.number_of_documents(rtxn)?,
|
||||||
database_size,
|
database_size: index.on_disk_size()?,
|
||||||
|
used_database_size: index.used_size()?,
|
||||||
field_distribution: index.field_distribution(rtxn)?,
|
field_distribution: index.field_distribution(rtxn)?,
|
||||||
created_at: index.created_at(rtxn)?,
|
created_at: index.created_at(rtxn)?,
|
||||||
updated_at: index.updated_at(rtxn)?,
|
updated_at: index.updated_at(rtxn)?,
|
||||||
@@ -123,15 +134,22 @@ impl IndexMapper {
|
|||||||
index_base_map_size: usize,
|
index_base_map_size: usize,
|
||||||
index_growth_amount: usize,
|
index_growth_amount: usize,
|
||||||
index_count: usize,
|
index_count: usize,
|
||||||
|
enable_mdb_writemap: bool,
|
||||||
indexer_config: IndexerConfig,
|
indexer_config: IndexerConfig,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
let mut wtxn = env.write_txn()?;
|
||||||
|
let index_mapping = env.create_database(&mut wtxn, Some(INDEX_MAPPING))?;
|
||||||
|
let index_stats = env.create_database(&mut wtxn, Some(INDEX_STATS))?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
|
index_map: Arc::new(RwLock::new(IndexMap::new(index_count))),
|
||||||
index_mapping: env.create_database(Some(INDEX_MAPPING))?,
|
index_mapping,
|
||||||
index_stats: env.create_database(Some(INDEX_STATS))?,
|
index_stats,
|
||||||
base_path,
|
base_path,
|
||||||
index_base_map_size,
|
index_base_map_size,
|
||||||
index_growth_amount,
|
index_growth_amount,
|
||||||
|
enable_mdb_writemap,
|
||||||
indexer_config: Arc::new(indexer_config),
|
indexer_config: Arc::new(indexer_config),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -162,6 +180,7 @@ impl IndexMapper {
|
|||||||
&uuid,
|
&uuid,
|
||||||
&index_path,
|
&index_path,
|
||||||
date,
|
date,
|
||||||
|
self.enable_mdb_writemap,
|
||||||
self.index_base_map_size,
|
self.index_base_map_size,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@@ -273,7 +292,11 @@ impl IndexMapper {
|
|||||||
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
.ok_or_else(|| Error::IndexNotFound(name.to_string()))?;
|
||||||
|
|
||||||
// We remove the index from the in-memory index map.
|
// We remove the index from the in-memory index map.
|
||||||
self.index_map.write().unwrap().close_for_resize(&uuid, self.index_growth_amount);
|
self.index_map.write().unwrap().close_for_resize(
|
||||||
|
&uuid,
|
||||||
|
self.enable_mdb_writemap,
|
||||||
|
self.index_growth_amount,
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -338,6 +361,7 @@ impl IndexMapper {
|
|||||||
&uuid,
|
&uuid,
|
||||||
&index_path,
|
&index_path,
|
||||||
None,
|
None,
|
||||||
|
self.enable_mdb_writemap,
|
||||||
self.index_base_map_size,
|
self.index_base_map_size,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ mod uuid_codec;
|
|||||||
pub type Result<T> = std::result::Result<T, Error>;
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
pub type TaskId = u32;
|
pub type TaskId = u32;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::{BTreeMap, HashMap};
|
||||||
use std::ops::{Bound, RangeBounds};
|
use std::ops::{Bound, RangeBounds};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
@@ -233,6 +233,8 @@ pub struct IndexSchedulerOptions {
|
|||||||
pub task_db_size: usize,
|
pub task_db_size: usize,
|
||||||
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
|
/// The size, in bytes, with which a meilisearch index is opened the first time of each meilisearch index.
|
||||||
pub index_base_map_size: usize,
|
pub index_base_map_size: usize,
|
||||||
|
/// Whether we open a meilisearch index with the MDB_WRITEMAP option or not.
|
||||||
|
pub enable_mdb_writemap: bool,
|
||||||
/// The size, in bytes, by which the map size of an index is increased when it resized due to being full.
|
/// The size, in bytes, by which the map size of an index is increased when it resized due to being full.
|
||||||
pub index_growth_amount: usize,
|
pub index_growth_amount: usize,
|
||||||
/// The number of indexes that can be concurrently opened in memory.
|
/// The number of indexes that can be concurrently opened in memory.
|
||||||
@@ -374,6 +376,11 @@ impl IndexScheduler {
|
|||||||
std::fs::create_dir_all(&options.indexes_path)?;
|
std::fs::create_dir_all(&options.indexes_path)?;
|
||||||
std::fs::create_dir_all(&options.dumps_path)?;
|
std::fs::create_dir_all(&options.dumps_path)?;
|
||||||
|
|
||||||
|
if cfg!(windows) && options.enable_mdb_writemap {
|
||||||
|
// programmer error if this happens: in normal use passing the option on Windows is an error in main
|
||||||
|
panic!("Windows doesn't support the MDB_WRITEMAP LMDB option");
|
||||||
|
}
|
||||||
|
|
||||||
let task_db_size = clamp_to_page_size(options.task_db_size);
|
let task_db_size = clamp_to_page_size(options.task_db_size);
|
||||||
let budget = if options.indexer_config.skip_index_budget {
|
let budget = if options.indexer_config.skip_index_budget {
|
||||||
IndexBudget {
|
IndexBudget {
|
||||||
@@ -396,25 +403,37 @@ impl IndexScheduler {
|
|||||||
.open(options.tasks_path)?;
|
.open(options.tasks_path)?;
|
||||||
let file_store = FileStore::new(&options.update_file_path)?;
|
let file_store = FileStore::new(&options.update_file_path)?;
|
||||||
|
|
||||||
|
let mut wtxn = env.write_txn()?;
|
||||||
|
let all_tasks = env.create_database(&mut wtxn, Some(db_name::ALL_TASKS))?;
|
||||||
|
let status = env.create_database(&mut wtxn, Some(db_name::STATUS))?;
|
||||||
|
let kind = env.create_database(&mut wtxn, Some(db_name::KIND))?;
|
||||||
|
let index_tasks = env.create_database(&mut wtxn, Some(db_name::INDEX_TASKS))?;
|
||||||
|
let canceled_by = env.create_database(&mut wtxn, Some(db_name::CANCELED_BY))?;
|
||||||
|
let enqueued_at = env.create_database(&mut wtxn, Some(db_name::ENQUEUED_AT))?;
|
||||||
|
let started_at = env.create_database(&mut wtxn, Some(db_name::STARTED_AT))?;
|
||||||
|
let finished_at = env.create_database(&mut wtxn, Some(db_name::FINISHED_AT))?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
// allow unreachable_code to get rids of the warning in the case of a test build.
|
// allow unreachable_code to get rids of the warning in the case of a test build.
|
||||||
let this = Self {
|
let this = Self {
|
||||||
must_stop_processing: MustStopProcessing::default(),
|
must_stop_processing: MustStopProcessing::default(),
|
||||||
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
processing_tasks: Arc::new(RwLock::new(ProcessingTasks::new())),
|
||||||
file_store,
|
file_store,
|
||||||
all_tasks: env.create_database(Some(db_name::ALL_TASKS))?,
|
all_tasks,
|
||||||
status: env.create_database(Some(db_name::STATUS))?,
|
status,
|
||||||
kind: env.create_database(Some(db_name::KIND))?,
|
kind,
|
||||||
index_tasks: env.create_database(Some(db_name::INDEX_TASKS))?,
|
index_tasks,
|
||||||
canceled_by: env.create_database(Some(db_name::CANCELED_BY))?,
|
canceled_by,
|
||||||
enqueued_at: env.create_database(Some(db_name::ENQUEUED_AT))?,
|
enqueued_at,
|
||||||
started_at: env.create_database(Some(db_name::STARTED_AT))?,
|
started_at,
|
||||||
finished_at: env.create_database(Some(db_name::FINISHED_AT))?,
|
finished_at,
|
||||||
index_mapper: IndexMapper::new(
|
index_mapper: IndexMapper::new(
|
||||||
&env,
|
&env,
|
||||||
options.indexes_path,
|
options.indexes_path,
|
||||||
budget.map_size,
|
budget.map_size,
|
||||||
options.index_growth_amount,
|
options.index_growth_amount,
|
||||||
budget.index_count,
|
budget.index_count,
|
||||||
|
options.enable_mdb_writemap,
|
||||||
options.indexer_config,
|
options.indexer_config,
|
||||||
)?,
|
)?,
|
||||||
env,
|
env,
|
||||||
@@ -554,10 +573,16 @@ impl IndexScheduler {
|
|||||||
&self.index_mapper.indexer_config
|
&self.index_mapper.indexer_config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the real database size (i.e.: The size **with** the free pages)
|
||||||
pub fn size(&self) -> Result<u64> {
|
pub fn size(&self) -> Result<u64> {
|
||||||
Ok(self.env.real_disk_size()?)
|
Ok(self.env.real_disk_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the used database size (i.e.: The size **without** the free pages)
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
Ok(self.env.non_free_pages_size()?)
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the index corresponding to the name.
|
/// Return the index corresponding to the name.
|
||||||
///
|
///
|
||||||
/// * If the index wasn't opened before, the index will be opened.
|
/// * If the index wasn't opened before, the index will be opened.
|
||||||
@@ -737,6 +762,38 @@ impl IndexScheduler {
|
|||||||
Ok(tasks)
|
Ok(tasks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The returned structure contains:
|
||||||
|
/// 1. The name of the property being observed can be `statuses`, `types`, or `indexes`.
|
||||||
|
/// 2. The name of the specific data related to the property can be `enqueued` for the `statuses`, `settingsUpdate` for the `types`, or the name of the index for the `indexes`, for example.
|
||||||
|
/// 3. The number of times the properties appeared.
|
||||||
|
pub fn get_stats(&self) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
|
||||||
|
let rtxn = self.read_txn()?;
|
||||||
|
|
||||||
|
let mut res = BTreeMap::new();
|
||||||
|
|
||||||
|
res.insert(
|
||||||
|
"statuses".to_string(),
|
||||||
|
enum_iterator::all::<Status>()
|
||||||
|
.map(|s| Ok((s.to_string(), self.get_status(&rtxn, s)?.len())))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
res.insert(
|
||||||
|
"types".to_string(),
|
||||||
|
enum_iterator::all::<Kind>()
|
||||||
|
.map(|s| Ok((s.to_string(), self.get_kind(&rtxn, s)?.len())))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
res.insert(
|
||||||
|
"indexes".to_string(),
|
||||||
|
self.index_tasks
|
||||||
|
.iter(&rtxn)?
|
||||||
|
.map(|res| Ok(res.map(|(name, bitmap)| (name.to_string(), bitmap.len()))?))
|
||||||
|
.collect::<Result<BTreeMap<String, u64>>>()?,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
/// Return true iff there is at least one task associated with this index
|
/// Return true iff there is at least one task associated with this index
|
||||||
/// that is processing.
|
/// that is processing.
|
||||||
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
pub fn is_index_processing(&self, index: &str) -> Result<bool> {
|
||||||
@@ -1471,6 +1528,7 @@ mod tests {
|
|||||||
dumps_path: tempdir.path().join("dumps"),
|
dumps_path: tempdir.path().join("dumps"),
|
||||||
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
task_db_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||||
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
index_base_map_size: 1000 * 1000, // 1 MB, we don't use MiB on purpose.
|
||||||
|
enable_mdb_writemap: false,
|
||||||
index_growth_amount: 1000 * 1000, // 1 MB
|
index_growth_amount: 1000 * 1000, // 1 MB
|
||||||
index_count: 5,
|
index_count: 5,
|
||||||
indexer_config,
|
indexer_config,
|
||||||
|
|||||||
@@ -466,7 +466,7 @@ impl IndexScheduler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
Details::DocumentDeletionByFilter { deleted_documents, original_filter: _ } => {
|
||||||
assert_eq!(kind.as_kind(), Kind::DocumentDeletionByFilter);
|
assert_eq!(kind.as_kind(), Kind::DocumentDeletion);
|
||||||
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
let (index_uid, _) = if let KindWithContent::DocumentDeletionByFilter {
|
||||||
ref index_uid,
|
ref index_uid,
|
||||||
ref filter_expr,
|
ref filter_expr,
|
||||||
|
|||||||
@@ -45,6 +45,11 @@ impl AuthController {
|
|||||||
self.store.size()
|
self.store.size()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the used size of the `AuthController` database in bytes.
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
self.store.used_size()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
pub fn create_key(&self, create_key: CreateApiKey) -> Result<Key> {
|
||||||
match self.store.get_api_key(create_key.uid)? {
|
match self.store.get_api_key(create_key.uid)? {
|
||||||
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
Some(_) => Err(AuthControllerError::ApiKeyAlreadyExists(create_key.uid.to_string())),
|
||||||
|
|||||||
@@ -55,9 +55,11 @@ impl HeedAuthStore {
|
|||||||
let path = path.as_ref().join(AUTH_DB_PATH);
|
let path = path.as_ref().join(AUTH_DB_PATH);
|
||||||
create_dir_all(&path)?;
|
create_dir_all(&path)?;
|
||||||
let env = Arc::new(open_auth_store_env(path.as_ref())?);
|
let env = Arc::new(open_auth_store_env(path.as_ref())?);
|
||||||
let keys = env.create_database(Some(KEY_DB_NAME))?;
|
let mut wtxn = env.write_txn()?;
|
||||||
|
let keys = env.create_database(&mut wtxn, Some(KEY_DB_NAME))?;
|
||||||
let action_keyid_index_expiration =
|
let action_keyid_index_expiration =
|
||||||
env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
env.create_database(&mut wtxn, Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
||||||
|
wtxn.commit()?;
|
||||||
Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true })
|
Ok(Self { env, keys, action_keyid_index_expiration, should_close_on_drop: true })
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -73,6 +75,11 @@ impl HeedAuthStore {
|
|||||||
Ok(self.env.real_disk_size()?)
|
Ok(self.env.real_disk_size()?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the number of bytes actually used in the database
|
||||||
|
pub fn used_size(&self) -> Result<u64> {
|
||||||
|
Ok(self.env.non_free_pages_size()?)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn set_drop_on_close(&mut self, v: bool) {
|
pub fn set_drop_on_close(&mut self, v: bool) {
|
||||||
self.should_close_on_drop = v;
|
self.should_close_on_drop = v;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,6 +150,7 @@ make_missing_field_convenience_builder!(MissingApiKeyActions, missing_api_key_ac
|
|||||||
make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at);
|
make_missing_field_convenience_builder!(MissingApiKeyExpiresAt, missing_api_key_expires_at);
|
||||||
make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes);
|
make_missing_field_convenience_builder!(MissingApiKeyIndexes, missing_api_key_indexes);
|
||||||
make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes);
|
make_missing_field_convenience_builder!(MissingSwapIndexes, missing_swap_indexes);
|
||||||
|
make_missing_field_convenience_builder!(MissingDocumentFilter, missing_document_filter);
|
||||||
|
|
||||||
// Integrate a sub-error into a [`DeserrError`] by taking its error message but using
|
// Integrate a sub-error into a [`DeserrError`] by taking its error message but using
|
||||||
// the default error code (C) from `Self`
|
// the default error code (C) from `Self`
|
||||||
|
|||||||
@@ -214,12 +214,12 @@ InvalidApiKeyUid , InvalidRequest , BAD_REQUEST ;
|
|||||||
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
InvalidContentType , InvalidRequest , UNSUPPORTED_MEDIA_TYPE ;
|
||||||
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentCsvDelimiter , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentFields , InvalidRequest , BAD_REQUEST ;
|
||||||
|
MissingDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentFilter , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentGeoField , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentId , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentLimit , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
InvalidDocumentOffset , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidDocumentDeleteFilter , InvalidRequest , BAD_REQUEST ;
|
|
||||||
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexLimit , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexOffset , InvalidRequest , BAD_REQUEST ;
|
||||||
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
InvalidIndexPrimaryKey , InvalidRequest , BAD_REQUEST ;
|
||||||
|
|||||||
@@ -395,7 +395,6 @@ impl std::error::Error for ParseTaskStatusError {}
|
|||||||
pub enum Kind {
|
pub enum Kind {
|
||||||
DocumentAdditionOrUpdate,
|
DocumentAdditionOrUpdate,
|
||||||
DocumentDeletion,
|
DocumentDeletion,
|
||||||
DocumentDeletionByFilter,
|
|
||||||
SettingsUpdate,
|
SettingsUpdate,
|
||||||
IndexCreation,
|
IndexCreation,
|
||||||
IndexDeletion,
|
IndexDeletion,
|
||||||
@@ -412,7 +411,6 @@ impl Kind {
|
|||||||
match self {
|
match self {
|
||||||
Kind::DocumentAdditionOrUpdate
|
Kind::DocumentAdditionOrUpdate
|
||||||
| Kind::DocumentDeletion
|
| Kind::DocumentDeletion
|
||||||
| Kind::DocumentDeletionByFilter
|
|
||||||
| Kind::SettingsUpdate
|
| Kind::SettingsUpdate
|
||||||
| Kind::IndexCreation
|
| Kind::IndexCreation
|
||||||
| Kind::IndexDeletion
|
| Kind::IndexDeletion
|
||||||
@@ -430,7 +428,6 @@ impl Display for Kind {
|
|||||||
match self {
|
match self {
|
||||||
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
|
Kind::DocumentAdditionOrUpdate => write!(f, "documentAdditionOrUpdate"),
|
||||||
Kind::DocumentDeletion => write!(f, "documentDeletion"),
|
Kind::DocumentDeletion => write!(f, "documentDeletion"),
|
||||||
Kind::DocumentDeletionByFilter => write!(f, "documentDeletionByFilter"),
|
|
||||||
Kind::SettingsUpdate => write!(f, "settingsUpdate"),
|
Kind::SettingsUpdate => write!(f, "settingsUpdate"),
|
||||||
Kind::IndexCreation => write!(f, "indexCreation"),
|
Kind::IndexCreation => write!(f, "indexCreation"),
|
||||||
Kind::IndexDeletion => write!(f, "indexDeletion"),
|
Kind::IndexDeletion => write!(f, "indexDeletion"),
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use actix_web::HttpRequest;
|
|||||||
use meilisearch_types::InstanceUid;
|
use meilisearch_types::InstanceUid;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use super::{find_user_id, Analytics, DocumentDeletionKind};
|
use super::{find_user_id, Analytics, DocumentDeletionKind, DocumentFetchKind};
|
||||||
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
||||||
use crate::routes::tasks::TasksFilterQuery;
|
use crate::routes::tasks::TasksFilterQuery;
|
||||||
use crate::Opt;
|
use crate::Opt;
|
||||||
@@ -71,6 +71,8 @@ impl Analytics for MockAnalytics {
|
|||||||
_request: &HttpRequest,
|
_request: &HttpRequest,
|
||||||
) {
|
) {
|
||||||
}
|
}
|
||||||
|
fn get_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
|
||||||
|
fn post_fetch_documents(&self, _documents_query: &DocumentFetchKind, _request: &HttpRequest) {}
|
||||||
fn get_tasks(&self, _query: &TasksFilterQuery, _request: &HttpRequest) {}
|
fn get_tasks(&self, _query: &TasksFilterQuery, _request: &HttpRequest) {}
|
||||||
fn health_seen(&self, _request: &HttpRequest) {}
|
fn health_seen(&self, _request: &HttpRequest) {}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,6 +67,12 @@ pub enum DocumentDeletionKind {
|
|||||||
PerFilter,
|
PerFilter,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
|
pub enum DocumentFetchKind {
|
||||||
|
PerDocumentId,
|
||||||
|
Normal { with_filter: bool, limit: usize, offset: usize },
|
||||||
|
}
|
||||||
|
|
||||||
pub trait Analytics: Sync + Send {
|
pub trait Analytics: Sync + Send {
|
||||||
fn instance_uid(&self) -> Option<&InstanceUid>;
|
fn instance_uid(&self) -> Option<&InstanceUid>;
|
||||||
|
|
||||||
@@ -90,6 +96,12 @@ pub trait Analytics: Sync + Send {
|
|||||||
request: &HttpRequest,
|
request: &HttpRequest,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// this method should be called to aggregate a fetch documents request
|
||||||
|
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
|
||||||
|
|
||||||
|
// this method should be called to aggregate a fetch documents request
|
||||||
|
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest);
|
||||||
|
|
||||||
// this method should be called to aggregate a add documents request
|
// this method should be called to aggregate a add documents request
|
||||||
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest);
|
fn delete_documents(&self, kind: DocumentDeletionKind, request: &HttpRequest);
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ use tokio::select;
|
|||||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use super::{config_user_id_path, DocumentDeletionKind, MEILISEARCH_CONFIG_PATH};
|
use super::{
|
||||||
|
config_user_id_path, DocumentDeletionKind, DocumentFetchKind, MEILISEARCH_CONFIG_PATH,
|
||||||
|
};
|
||||||
use crate::analytics::Analytics;
|
use crate::analytics::Analytics;
|
||||||
use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, ScheduleSnapshot};
|
use crate::option::{default_http_addr, IndexerOpts, MaxMemory, MaxThreads, ScheduleSnapshot};
|
||||||
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
use crate::routes::indexes::documents::UpdateDocumentsQuery;
|
||||||
@@ -72,6 +74,8 @@ pub enum AnalyticsMsg {
|
|||||||
AggregateAddDocuments(DocumentsAggregator),
|
AggregateAddDocuments(DocumentsAggregator),
|
||||||
AggregateDeleteDocuments(DocumentsDeletionAggregator),
|
AggregateDeleteDocuments(DocumentsDeletionAggregator),
|
||||||
AggregateUpdateDocuments(DocumentsAggregator),
|
AggregateUpdateDocuments(DocumentsAggregator),
|
||||||
|
AggregateGetFetchDocuments(DocumentsFetchAggregator),
|
||||||
|
AggregatePostFetchDocuments(DocumentsFetchAggregator),
|
||||||
AggregateTasks(TasksAggregator),
|
AggregateTasks(TasksAggregator),
|
||||||
AggregateHealth(HealthAggregator),
|
AggregateHealth(HealthAggregator),
|
||||||
}
|
}
|
||||||
@@ -139,6 +143,8 @@ impl SegmentAnalytics {
|
|||||||
add_documents_aggregator: DocumentsAggregator::default(),
|
add_documents_aggregator: DocumentsAggregator::default(),
|
||||||
delete_documents_aggregator: DocumentsDeletionAggregator::default(),
|
delete_documents_aggregator: DocumentsDeletionAggregator::default(),
|
||||||
update_documents_aggregator: DocumentsAggregator::default(),
|
update_documents_aggregator: DocumentsAggregator::default(),
|
||||||
|
get_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
|
||||||
|
post_fetch_documents_aggregator: DocumentsFetchAggregator::default(),
|
||||||
get_tasks_aggregator: TasksAggregator::default(),
|
get_tasks_aggregator: TasksAggregator::default(),
|
||||||
health_aggregator: HealthAggregator::default(),
|
health_aggregator: HealthAggregator::default(),
|
||||||
});
|
});
|
||||||
@@ -205,6 +211,16 @@ impl super::Analytics for SegmentAnalytics {
|
|||||||
let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate));
|
let _ = self.sender.try_send(AnalyticsMsg::AggregateUpdateDocuments(aggregate));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
|
||||||
|
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
|
||||||
|
let _ = self.sender.try_send(AnalyticsMsg::AggregateGetFetchDocuments(aggregate));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn post_fetch_documents(&self, documents_query: &DocumentFetchKind, request: &HttpRequest) {
|
||||||
|
let aggregate = DocumentsFetchAggregator::from_query(documents_query, request);
|
||||||
|
let _ = self.sender.try_send(AnalyticsMsg::AggregatePostFetchDocuments(aggregate));
|
||||||
|
}
|
||||||
|
|
||||||
fn get_tasks(&self, query: &TasksFilterQuery, request: &HttpRequest) {
|
fn get_tasks(&self, query: &TasksFilterQuery, request: &HttpRequest) {
|
||||||
let aggregate = TasksAggregator::from_query(query, request);
|
let aggregate = TasksAggregator::from_query(query, request);
|
||||||
let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate));
|
let _ = self.sender.try_send(AnalyticsMsg::AggregateTasks(aggregate));
|
||||||
@@ -225,6 +241,7 @@ impl super::Analytics for SegmentAnalytics {
|
|||||||
struct Infos {
|
struct Infos {
|
||||||
env: String,
|
env: String,
|
||||||
experimental_enable_metrics: bool,
|
experimental_enable_metrics: bool,
|
||||||
|
experimental_reduce_indexing_memory_usage: bool,
|
||||||
db_path: bool,
|
db_path: bool,
|
||||||
import_dump: bool,
|
import_dump: bool,
|
||||||
dump_dir: bool,
|
dump_dir: bool,
|
||||||
@@ -258,6 +275,7 @@ impl From<Opt> for Infos {
|
|||||||
let Opt {
|
let Opt {
|
||||||
db_path,
|
db_path,
|
||||||
experimental_enable_metrics,
|
experimental_enable_metrics,
|
||||||
|
experimental_reduce_indexing_memory_usage,
|
||||||
http_addr,
|
http_addr,
|
||||||
master_key: _,
|
master_key: _,
|
||||||
env,
|
env,
|
||||||
@@ -300,6 +318,7 @@ impl From<Opt> for Infos {
|
|||||||
Self {
|
Self {
|
||||||
env,
|
env,
|
||||||
experimental_enable_metrics,
|
experimental_enable_metrics,
|
||||||
|
experimental_reduce_indexing_memory_usage,
|
||||||
db_path: db_path != PathBuf::from("./data.ms"),
|
db_path: db_path != PathBuf::from("./data.ms"),
|
||||||
import_dump: import_dump.is_some(),
|
import_dump: import_dump.is_some(),
|
||||||
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
dump_dir: dump_dir != PathBuf::from("dumps/"),
|
||||||
@@ -338,6 +357,8 @@ pub struct Segment {
|
|||||||
add_documents_aggregator: DocumentsAggregator,
|
add_documents_aggregator: DocumentsAggregator,
|
||||||
delete_documents_aggregator: DocumentsDeletionAggregator,
|
delete_documents_aggregator: DocumentsDeletionAggregator,
|
||||||
update_documents_aggregator: DocumentsAggregator,
|
update_documents_aggregator: DocumentsAggregator,
|
||||||
|
get_fetch_documents_aggregator: DocumentsFetchAggregator,
|
||||||
|
post_fetch_documents_aggregator: DocumentsFetchAggregator,
|
||||||
get_tasks_aggregator: TasksAggregator,
|
get_tasks_aggregator: TasksAggregator,
|
||||||
health_aggregator: HealthAggregator,
|
health_aggregator: HealthAggregator,
|
||||||
}
|
}
|
||||||
@@ -400,6 +421,8 @@ impl Segment {
|
|||||||
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
|
Some(AnalyticsMsg::AggregateAddDocuments(agreg)) => self.add_documents_aggregator.aggregate(agreg),
|
||||||
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
|
Some(AnalyticsMsg::AggregateDeleteDocuments(agreg)) => self.delete_documents_aggregator.aggregate(agreg),
|
||||||
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
|
Some(AnalyticsMsg::AggregateUpdateDocuments(agreg)) => self.update_documents_aggregator.aggregate(agreg),
|
||||||
|
Some(AnalyticsMsg::AggregateGetFetchDocuments(agreg)) => self.get_fetch_documents_aggregator.aggregate(agreg),
|
||||||
|
Some(AnalyticsMsg::AggregatePostFetchDocuments(agreg)) => self.post_fetch_documents_aggregator.aggregate(agreg),
|
||||||
Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg),
|
Some(AnalyticsMsg::AggregateTasks(agreg)) => self.get_tasks_aggregator.aggregate(agreg),
|
||||||
Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg),
|
Some(AnalyticsMsg::AggregateHealth(agreg)) => self.health_aggregator.aggregate(agreg),
|
||||||
None => (),
|
None => (),
|
||||||
@@ -450,6 +473,10 @@ impl Segment {
|
|||||||
.into_event(&self.user, "Documents Deleted");
|
.into_event(&self.user, "Documents Deleted");
|
||||||
let update_documents = std::mem::take(&mut self.update_documents_aggregator)
|
let update_documents = std::mem::take(&mut self.update_documents_aggregator)
|
||||||
.into_event(&self.user, "Documents Updated");
|
.into_event(&self.user, "Documents Updated");
|
||||||
|
let get_fetch_documents = std::mem::take(&mut self.get_fetch_documents_aggregator)
|
||||||
|
.into_event(&self.user, "Documents Fetched GET");
|
||||||
|
let post_fetch_documents = std::mem::take(&mut self.post_fetch_documents_aggregator)
|
||||||
|
.into_event(&self.user, "Documents Fetched POST");
|
||||||
let get_tasks =
|
let get_tasks =
|
||||||
std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen");
|
std::mem::take(&mut self.get_tasks_aggregator).into_event(&self.user, "Tasks Seen");
|
||||||
let health =
|
let health =
|
||||||
@@ -473,6 +500,12 @@ impl Segment {
|
|||||||
if let Some(update_documents) = update_documents {
|
if let Some(update_documents) = update_documents {
|
||||||
let _ = self.batcher.push(update_documents).await;
|
let _ = self.batcher.push(update_documents).await;
|
||||||
}
|
}
|
||||||
|
if let Some(get_fetch_documents) = get_fetch_documents {
|
||||||
|
let _ = self.batcher.push(get_fetch_documents).await;
|
||||||
|
}
|
||||||
|
if let Some(post_fetch_documents) = post_fetch_documents {
|
||||||
|
let _ = self.batcher.push(post_fetch_documents).await;
|
||||||
|
}
|
||||||
if let Some(get_tasks) = get_tasks {
|
if let Some(get_tasks) = get_tasks {
|
||||||
let _ = self.batcher.push(get_tasks).await;
|
let _ = self.batcher.push(get_tasks).await;
|
||||||
}
|
}
|
||||||
@@ -1135,3 +1168,76 @@ impl HealthAggregator {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Default, Serialize)]
|
||||||
|
pub struct DocumentsFetchAggregator {
|
||||||
|
#[serde(skip)]
|
||||||
|
timestamp: Option<OffsetDateTime>,
|
||||||
|
|
||||||
|
// context
|
||||||
|
#[serde(rename = "user-agent")]
|
||||||
|
user_agents: HashSet<String>,
|
||||||
|
|
||||||
|
#[serde(rename = "requests.max_limit")]
|
||||||
|
total_received: usize,
|
||||||
|
|
||||||
|
// a call on ../documents/:doc_id
|
||||||
|
per_document_id: bool,
|
||||||
|
// if a filter was used
|
||||||
|
per_filter: bool,
|
||||||
|
|
||||||
|
// pagination
|
||||||
|
#[serde(rename = "pagination.max_limit")]
|
||||||
|
max_limit: usize,
|
||||||
|
#[serde(rename = "pagination.max_offset")]
|
||||||
|
max_offset: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DocumentsFetchAggregator {
|
||||||
|
pub fn from_query(query: &DocumentFetchKind, request: &HttpRequest) -> Self {
|
||||||
|
let (limit, offset) = match query {
|
||||||
|
DocumentFetchKind::PerDocumentId => (1, 0),
|
||||||
|
DocumentFetchKind::Normal { limit, offset, .. } => (*limit, *offset),
|
||||||
|
};
|
||||||
|
Self {
|
||||||
|
timestamp: Some(OffsetDateTime::now_utc()),
|
||||||
|
user_agents: extract_user_agents(request).into_iter().collect(),
|
||||||
|
total_received: 1,
|
||||||
|
per_document_id: matches!(query, DocumentFetchKind::PerDocumentId),
|
||||||
|
per_filter: matches!(query, DocumentFetchKind::Normal { with_filter, .. } if *with_filter),
|
||||||
|
max_limit: limit,
|
||||||
|
max_offset: offset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Aggregate one [DocumentsFetchAggregator] into another.
|
||||||
|
pub fn aggregate(&mut self, other: Self) {
|
||||||
|
if self.timestamp.is_none() {
|
||||||
|
self.timestamp = other.timestamp;
|
||||||
|
}
|
||||||
|
for user_agent in other.user_agents {
|
||||||
|
self.user_agents.insert(user_agent);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.total_received = self.total_received.saturating_add(other.total_received);
|
||||||
|
self.per_document_id |= other.per_document_id;
|
||||||
|
self.per_filter |= other.per_filter;
|
||||||
|
|
||||||
|
self.max_limit = self.max_limit.max(other.max_limit);
|
||||||
|
self.max_offset = self.max_offset.max(other.max_offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn into_event(self, user: &User, event_name: &str) -> Option<Track> {
|
||||||
|
// if we had no timestamp it means we never encountered any events and
|
||||||
|
// thus we don't need to send this event.
|
||||||
|
let timestamp = self.timestamp?;
|
||||||
|
|
||||||
|
Some(Track {
|
||||||
|
timestamp: Some(timestamp),
|
||||||
|
user: user.clone(),
|
||||||
|
event: event_name.to_string(),
|
||||||
|
properties: serde_json::to_value(self).ok()?,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
use actix_web as aweb;
|
use actix_web as aweb;
|
||||||
use aweb::error::{JsonPayloadError, QueryPayloadError};
|
use aweb::error::{JsonPayloadError, QueryPayloadError};
|
||||||
|
use byte_unit::Byte;
|
||||||
use meilisearch_types::document_formats::{DocumentFormatError, PayloadType};
|
use meilisearch_types::document_formats::{DocumentFormatError, PayloadType};
|
||||||
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
use meilisearch_types::error::{Code, ErrorCode, ResponseError};
|
||||||
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
use meilisearch_types::index_uid::{IndexUid, IndexUidFormatError};
|
||||||
@@ -26,8 +27,8 @@ pub enum MeilisearchHttpError {
|
|||||||
InvalidExpression(&'static [&'static str], Value),
|
InvalidExpression(&'static [&'static str], Value),
|
||||||
#[error("A {0} payload is missing.")]
|
#[error("A {0} payload is missing.")]
|
||||||
MissingPayload(PayloadType),
|
MissingPayload(PayloadType),
|
||||||
#[error("The provided payload reached the size limit.")]
|
#[error("The provided payload reached the size limit. The maximum accepted payload size is {}.", Byte::from_bytes(*.0 as u64).get_appropriate_unit(true))]
|
||||||
PayloadTooLarge,
|
PayloadTooLarge(usize),
|
||||||
#[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.",
|
#[error("Two indexes must be given for each swap. The list `[{}]` contains {} indexes.",
|
||||||
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()
|
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()
|
||||||
)]
|
)]
|
||||||
@@ -60,9 +61,9 @@ impl ErrorCode for MeilisearchHttpError {
|
|||||||
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
|
MeilisearchHttpError::MissingPayload(_) => Code::MissingPayload,
|
||||||
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
|
MeilisearchHttpError::InvalidContentType(_, _) => Code::InvalidContentType,
|
||||||
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
|
MeilisearchHttpError::DocumentNotFound(_) => Code::DocumentNotFound,
|
||||||
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentDeleteFilter,
|
MeilisearchHttpError::EmptyFilter => Code::InvalidDocumentFilter,
|
||||||
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
|
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
|
||||||
MeilisearchHttpError::PayloadTooLarge => Code::PayloadTooLarge,
|
MeilisearchHttpError::PayloadTooLarge(_) => Code::PayloadTooLarge,
|
||||||
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
|
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
|
||||||
MeilisearchHttpError::IndexUid(e) => e.error_code(),
|
MeilisearchHttpError::IndexUid(e) => e.error_code(),
|
||||||
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ use crate::error::MeilisearchHttpError;
|
|||||||
pub struct Payload {
|
pub struct Payload {
|
||||||
payload: Decompress<dev::Payload>,
|
payload: Decompress<dev::Payload>,
|
||||||
limit: usize,
|
limit: usize,
|
||||||
|
remaining: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct PayloadConfig {
|
pub struct PayloadConfig {
|
||||||
@@ -43,6 +44,7 @@ impl FromRequest for Payload {
|
|||||||
ready(Ok(Payload {
|
ready(Ok(Payload {
|
||||||
payload: Decompress::from_headers(payload.take(), req.headers()),
|
payload: Decompress::from_headers(payload.take(), req.headers()),
|
||||||
limit,
|
limit,
|
||||||
|
remaining: limit,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -54,12 +56,14 @@ impl Stream for Payload {
|
|||||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
match Pin::new(&mut self.payload).poll_next(cx) {
|
match Pin::new(&mut self.payload).poll_next(cx) {
|
||||||
Poll::Ready(Some(result)) => match result {
|
Poll::Ready(Some(result)) => match result {
|
||||||
Ok(bytes) => match self.limit.checked_sub(bytes.len()) {
|
Ok(bytes) => match self.remaining.checked_sub(bytes.len()) {
|
||||||
Some(new_limit) => {
|
Some(new_limit) => {
|
||||||
self.limit = new_limit;
|
self.remaining = new_limit;
|
||||||
Poll::Ready(Some(Ok(bytes)))
|
Poll::Ready(Some(Ok(bytes)))
|
||||||
}
|
}
|
||||||
None => Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge))),
|
None => {
|
||||||
|
Poll::Ready(Some(Err(MeilisearchHttpError::PayloadTooLarge(self.limit))))
|
||||||
|
}
|
||||||
},
|
},
|
||||||
x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))),
|
x => Poll::Ready(Some(x.map_err(MeilisearchHttpError::from))),
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -232,6 +232,7 @@ fn open_or_create_database_unchecked(
|
|||||||
dumps_path: opt.dump_dir.clone(),
|
dumps_path: opt.dump_dir.clone(),
|
||||||
task_db_size: opt.max_task_db_size.get_bytes() as usize,
|
task_db_size: opt.max_task_db_size.get_bytes() as usize,
|
||||||
index_base_map_size: opt.max_index_size.get_bytes() as usize,
|
index_base_map_size: opt.max_index_size.get_bytes() as usize,
|
||||||
|
enable_mdb_writemap: opt.experimental_reduce_indexing_memory_usage,
|
||||||
indexer_config: (&opt.indexer_options).try_into()?,
|
indexer_config: (&opt.indexer_options).try_into()?,
|
||||||
autobatching_enabled: true,
|
autobatching_enabled: true,
|
||||||
max_number_of_tasks: 1_000_000,
|
max_number_of_tasks: 1_000_000,
|
||||||
|
|||||||
@@ -29,6 +29,11 @@ fn setup(opt: &Opt) -> anyhow::Result<()> {
|
|||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
let (opt, config_read_from) = Opt::try_build()?;
|
let (opt, config_read_from) = Opt::try_build()?;
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
!(cfg!(windows) && opt.experimental_reduce_indexing_memory_usage),
|
||||||
|
"The `experimental-reduce-indexing-memory-usage` flag is not supported on Windows"
|
||||||
|
);
|
||||||
|
|
||||||
setup(&opt)?;
|
setup(&opt)?;
|
||||||
|
|
||||||
match (opt.env.as_ref(), &opt.master_key) {
|
match (opt.env.as_ref(), &opt.master_key) {
|
||||||
|
|||||||
@@ -4,19 +4,31 @@ use prometheus::{
|
|||||||
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
register_int_gauge_vec, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec,
|
||||||
};
|
};
|
||||||
|
|
||||||
const HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: &[f64; 14] = &[
|
/// Create evenly distributed buckets
|
||||||
0.0005, 0.0008, 0.00085, 0.0009, 0.00095, 0.001, 0.00105, 0.0011, 0.00115, 0.0012, 0.0015,
|
fn create_buckets() -> [f64; 29] {
|
||||||
0.002, 0.003, 1.0,
|
(0..10)
|
||||||
];
|
.chain((10..100).step_by(10))
|
||||||
|
.chain((100..=1000).step_by(100))
|
||||||
|
.map(|i| i as f64 / 1000.)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.try_into()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
pub static ref HTTP_RESPONSE_TIME_CUSTOM_BUCKETS: [f64; 29] = create_buckets();
|
||||||
opts!("http_requests_total", "HTTP requests total"),
|
pub static ref MEILISEARCH_HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||||
|
opts!("meilisearch_http_requests_total", "Meilisearch HTTP requests total"),
|
||||||
&["method", "path"]
|
&["method", "path"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
|
pub static ref MEILISEARCH_DB_SIZE_BYTES: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch Db Size In Bytes"))
|
register_int_gauge!(opts!("meilisearch_db_size_bytes", "Meilisearch DB Size In Bytes"))
|
||||||
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_USED_DB_SIZE_BYTES: IntGauge = register_int_gauge!(opts!(
|
||||||
|
"meilisearch_used_db_size_bytes",
|
||||||
|
"Meilisearch Used DB Size In Bytes"
|
||||||
|
))
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
pub static ref MEILISEARCH_INDEX_COUNT: IntGauge =
|
||||||
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
register_int_gauge!(opts!("meilisearch_index_count", "Meilisearch Index Count"))
|
||||||
@@ -26,11 +38,16 @@ lazy_static! {
|
|||||||
&["index"]
|
&["index"]
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
pub static ref HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
pub static ref MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS: HistogramVec = register_histogram_vec!(
|
||||||
"http_response_time_seconds",
|
"http_response_time_seconds",
|
||||||
"HTTP response times",
|
"HTTP response times",
|
||||||
&["method", "path"],
|
&["method", "path"],
|
||||||
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
HTTP_RESPONSE_TIME_CUSTOM_BUCKETS.to_vec()
|
||||||
)
|
)
|
||||||
.expect("Can't create a metric");
|
.expect("Can't create a metric");
|
||||||
|
pub static ref MEILISEARCH_NB_TASKS: IntGaugeVec = register_int_gauge_vec!(
|
||||||
|
opts!("meilisearch_nb_tasks", "Meilisearch Number of tasks"),
|
||||||
|
&["kind", "value"]
|
||||||
|
)
|
||||||
|
.expect("Can't create a metric");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,11 +52,11 @@ where
|
|||||||
if is_registered_resource {
|
if is_registered_resource {
|
||||||
let request_method = req.method().to_string();
|
let request_method = req.method().to_string();
|
||||||
histogram_timer = Some(
|
histogram_timer = Some(
|
||||||
crate::metrics::HTTP_RESPONSE_TIME_SECONDS
|
crate::metrics::MEILISEARCH_HTTP_RESPONSE_TIME_SECONDS
|
||||||
.with_label_values(&[&request_method, request_path])
|
.with_label_values(&[&request_method, request_path])
|
||||||
.start_timer(),
|
.start_timer(),
|
||||||
);
|
);
|
||||||
crate::metrics::HTTP_REQUESTS_TOTAL
|
crate::metrics::MEILISEARCH_HTTP_REQUESTS_TOTAL
|
||||||
.with_label_values(&[&request_method, request_path])
|
.with_label_values(&[&request_method, request_path])
|
||||||
.inc();
|
.inc();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,6 +48,8 @@ const MEILI_IGNORE_DUMP_IF_DB_EXISTS: &str = "MEILI_IGNORE_DUMP_IF_DB_EXISTS";
|
|||||||
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
|
const MEILI_DUMP_DIR: &str = "MEILI_DUMP_DIR";
|
||||||
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
const MEILI_LOG_LEVEL: &str = "MEILI_LOG_LEVEL";
|
||||||
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
|
const MEILI_EXPERIMENTAL_ENABLE_METRICS: &str = "MEILI_EXPERIMENTAL_ENABLE_METRICS";
|
||||||
|
const MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE: &str =
|
||||||
|
"MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE";
|
||||||
|
|
||||||
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
const DEFAULT_CONFIG_FILE_PATH: &str = "./config.toml";
|
||||||
const DEFAULT_DB_PATH: &str = "./data.ms";
|
const DEFAULT_DB_PATH: &str = "./data.ms";
|
||||||
@@ -293,6 +295,11 @@ pub struct Opt {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub experimental_enable_metrics: bool,
|
pub experimental_enable_metrics: bool,
|
||||||
|
|
||||||
|
/// Experimental RAM reduction during indexing, do not use in production, see: <https://github.com/meilisearch/product/discussions/652>
|
||||||
|
#[clap(long, env = MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub experimental_reduce_indexing_memory_usage: bool,
|
||||||
|
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
#[clap(flatten)]
|
#[clap(flatten)]
|
||||||
pub indexer_options: IndexerOpts,
|
pub indexer_options: IndexerOpts,
|
||||||
@@ -385,6 +392,7 @@ impl Opt {
|
|||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
no_analytics,
|
no_analytics,
|
||||||
experimental_enable_metrics: enable_metrics_route,
|
experimental_enable_metrics: enable_metrics_route,
|
||||||
|
experimental_reduce_indexing_memory_usage: reduce_indexing_memory_usage,
|
||||||
} = self;
|
} = self;
|
||||||
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
export_to_env_if_not_present(MEILI_DB_PATH, db_path);
|
||||||
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
export_to_env_if_not_present(MEILI_HTTP_ADDR, http_addr);
|
||||||
@@ -426,6 +434,10 @@ impl Opt {
|
|||||||
MEILI_EXPERIMENTAL_ENABLE_METRICS,
|
MEILI_EXPERIMENTAL_ENABLE_METRICS,
|
||||||
enable_metrics_route.to_string(),
|
enable_metrics_route.to_string(),
|
||||||
);
|
);
|
||||||
|
export_to_env_if_not_present(
|
||||||
|
MEILI_EXPERIMENTAL_REDUCE_INDEXING_MEMORY_USAGE,
|
||||||
|
reduce_indexing_memory_usage.to_string(),
|
||||||
|
);
|
||||||
indexer_options.export_to_env();
|
indexer_options.export_to_env();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ use tempfile::tempfile;
|
|||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter};
|
use tokio::io::{AsyncSeekExt, AsyncWriteExt, BufWriter};
|
||||||
|
|
||||||
use crate::analytics::{Analytics, DocumentDeletionKind};
|
use crate::analytics::{Analytics, DocumentDeletionKind, DocumentFetchKind};
|
||||||
use crate::error::MeilisearchHttpError;
|
use crate::error::MeilisearchHttpError;
|
||||||
use crate::error::PayloadError::ReceivePayload;
|
use crate::error::PayloadError::ReceivePayload;
|
||||||
use crate::extractors::authentication::policies::*;
|
use crate::extractors::authentication::policies::*;
|
||||||
@@ -97,10 +97,14 @@ pub async fn get_document(
|
|||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||||
document_param: web::Path<DocumentParam>,
|
document_param: web::Path<DocumentParam>,
|
||||||
params: AwebQueryParameter<GetDocument, DeserrQueryParamError>,
|
params: AwebQueryParameter<GetDocument, DeserrQueryParamError>,
|
||||||
|
req: HttpRequest,
|
||||||
|
analytics: web::Data<dyn Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let DocumentParam { index_uid, document_id } = document_param.into_inner();
|
let DocumentParam { index_uid, document_id } = document_param.into_inner();
|
||||||
let index_uid = IndexUid::try_from(index_uid)?;
|
let index_uid = IndexUid::try_from(index_uid)?;
|
||||||
|
|
||||||
|
analytics.get_fetch_documents(&DocumentFetchKind::PerDocumentId, &req);
|
||||||
|
|
||||||
let GetDocument { fields } = params.into_inner();
|
let GetDocument { fields } = params.into_inner();
|
||||||
let attributes_to_retrieve = fields.merge_star_and_none();
|
let attributes_to_retrieve = fields.merge_star_and_none();
|
||||||
|
|
||||||
@@ -161,16 +165,31 @@ pub async fn documents_by_query_post(
|
|||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
body: AwebJson<BrowseQuery, DeserrJsonError>,
|
body: AwebJson<BrowseQuery, DeserrJsonError>,
|
||||||
|
req: HttpRequest,
|
||||||
|
analytics: web::Data<dyn Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!("called with body: {:?}", body);
|
debug!("called with body: {:?}", body);
|
||||||
|
|
||||||
documents_by_query(&index_scheduler, index_uid, body.into_inner())
|
let body = body.into_inner();
|
||||||
|
|
||||||
|
analytics.post_fetch_documents(
|
||||||
|
&DocumentFetchKind::Normal {
|
||||||
|
with_filter: body.filter.is_some(),
|
||||||
|
limit: body.limit,
|
||||||
|
offset: body.offset,
|
||||||
|
},
|
||||||
|
&req,
|
||||||
|
);
|
||||||
|
|
||||||
|
documents_by_query(&index_scheduler, index_uid, body)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_documents(
|
pub async fn get_documents(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::DOCUMENTS_GET }>, Data<IndexScheduler>>,
|
||||||
index_uid: web::Path<String>,
|
index_uid: web::Path<String>,
|
||||||
params: AwebQueryParameter<BrowseQueryGet, DeserrQueryParamError>,
|
params: AwebQueryParameter<BrowseQueryGet, DeserrQueryParamError>,
|
||||||
|
req: HttpRequest,
|
||||||
|
analytics: web::Data<dyn Analytics>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
debug!("called with params: {:?}", params);
|
debug!("called with params: {:?}", params);
|
||||||
|
|
||||||
@@ -191,6 +210,15 @@ pub async fn get_documents(
|
|||||||
filter,
|
filter,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
analytics.get_fetch_documents(
|
||||||
|
&DocumentFetchKind::Normal {
|
||||||
|
with_filter: query.filter.is_some(),
|
||||||
|
limit: query.limit,
|
||||||
|
offset: query.offset,
|
||||||
|
},
|
||||||
|
&req,
|
||||||
|
);
|
||||||
|
|
||||||
documents_by_query(&index_scheduler, index_uid, query)
|
documents_by_query(&index_scheduler, index_uid, query)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,7 +486,7 @@ pub async fn delete_documents_batch(
|
|||||||
#[derive(Debug, Deserr)]
|
#[derive(Debug, Deserr)]
|
||||||
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
#[deserr(error = DeserrJsonError, rename_all = camelCase, deny_unknown_fields)]
|
||||||
pub struct DocumentDeletionByFilter {
|
pub struct DocumentDeletionByFilter {
|
||||||
#[deserr(error = DeserrJsonError<InvalidDocumentDeleteFilter>)]
|
#[deserr(error = DeserrJsonError<InvalidDocumentFilter>, missing_field_error = DeserrJsonError::missing_document_filter)]
|
||||||
filter: Value,
|
filter: Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -480,8 +508,8 @@ pub async fn delete_documents_by_filter(
|
|||||||
|| -> Result<_, ResponseError> {
|
|| -> Result<_, ResponseError> {
|
||||||
Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?)
|
Ok(crate::search::parse_filter(&filter)?.ok_or(MeilisearchHttpError::EmptyFilter)?)
|
||||||
}()
|
}()
|
||||||
// and whatever was the error, the error code should always be an InvalidDocumentDeleteFilter
|
// and whatever was the error, the error code should always be an InvalidDocumentFilter
|
||||||
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentDeleteFilter))?;
|
.map_err(|err| ResponseError::from_msg(err.message, Code::InvalidDocumentFilter))?;
|
||||||
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
|
let task = KindWithContent::DocumentDeletionByFilter { index_uid, filter_expr: filter };
|
||||||
|
|
||||||
let task: SummarizedTaskView =
|
let task: SummarizedTaskView =
|
||||||
@@ -540,7 +568,12 @@ fn retrieve_documents<S: AsRef<str>>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let candidates = if let Some(filter) = filter {
|
let candidates = if let Some(filter) = filter {
|
||||||
filter.evaluate(&rtxn, index)?
|
filter.evaluate(&rtxn, index).map_err(|err| match err {
|
||||||
|
milli::Error::UserError(milli::UserError::InvalidFilter(_)) => {
|
||||||
|
ResponseError::from_msg(err.to_string(), Code::InvalidDocumentFilter)
|
||||||
|
}
|
||||||
|
e => e.into(),
|
||||||
|
})?
|
||||||
} else {
|
} else {
|
||||||
index.documents_ids(&rtxn)?
|
index.documents_ids(&rtxn)?
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub fn configure(config: &mut web::ServiceConfig) {
|
|||||||
|
|
||||||
pub async fn get_metrics(
|
pub async fn get_metrics(
|
||||||
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
index_scheduler: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<IndexScheduler>>,
|
||||||
auth_controller: GuardedData<ActionPolicy<{ actions::METRICS_GET }>, Data<AuthController>>,
|
auth_controller: Data<AuthController>,
|
||||||
) -> Result<HttpResponse, ResponseError> {
|
) -> Result<HttpResponse, ResponseError> {
|
||||||
let auth_filters = index_scheduler.filters();
|
let auth_filters = index_scheduler.filters();
|
||||||
if !auth_filters.all_indexes_authorized() {
|
if !auth_filters.all_indexes_authorized() {
|
||||||
@@ -28,10 +28,10 @@ pub async fn get_metrics(
|
|||||||
return Err(error);
|
return Err(error);
|
||||||
}
|
}
|
||||||
|
|
||||||
let response =
|
let response = create_all_stats((*index_scheduler).clone(), auth_controller, auth_filters)?;
|
||||||
create_all_stats((*index_scheduler).clone(), (*auth_controller).clone(), auth_filters)?;
|
|
||||||
|
|
||||||
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
crate::metrics::MEILISEARCH_DB_SIZE_BYTES.set(response.database_size as i64);
|
||||||
|
crate::metrics::MEILISEARCH_USED_DB_SIZE_BYTES.set(response.used_database_size as i64);
|
||||||
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
crate::metrics::MEILISEARCH_INDEX_COUNT.set(response.indexes.len() as i64);
|
||||||
|
|
||||||
for (index, value) in response.indexes.iter() {
|
for (index, value) in response.indexes.iter() {
|
||||||
@@ -40,6 +40,14 @@ pub async fn get_metrics(
|
|||||||
.set(value.number_of_documents as i64);
|
.set(value.number_of_documents as i64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (kind, value) in index_scheduler.get_stats()? {
|
||||||
|
for (value, count) in value {
|
||||||
|
crate::metrics::MEILISEARCH_NB_TASKS
|
||||||
|
.with_label_values(&[&kind, &value])
|
||||||
|
.set(count as i64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let encoder = TextEncoder::new();
|
let encoder = TextEncoder::new();
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
encoder.encode(&prometheus::gather(), &mut buffer).expect("Failed to encode metrics");
|
||||||
|
|||||||
@@ -231,6 +231,8 @@ pub async fn running() -> HttpResponse {
|
|||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
pub struct Stats {
|
pub struct Stats {
|
||||||
pub database_size: u64,
|
pub database_size: u64,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub used_database_size: u64,
|
||||||
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
#[serde(serialize_with = "time::serde::rfc3339::option::serialize")]
|
||||||
pub last_update: Option<OffsetDateTime>,
|
pub last_update: Option<OffsetDateTime>,
|
||||||
pub indexes: BTreeMap<String, indexes::IndexStats>,
|
pub indexes: BTreeMap<String, indexes::IndexStats>,
|
||||||
@@ -259,6 +261,7 @@ pub fn create_all_stats(
|
|||||||
let mut last_task: Option<OffsetDateTime> = None;
|
let mut last_task: Option<OffsetDateTime> = None;
|
||||||
let mut indexes = BTreeMap::new();
|
let mut indexes = BTreeMap::new();
|
||||||
let mut database_size = 0;
|
let mut database_size = 0;
|
||||||
|
let mut used_database_size = 0;
|
||||||
|
|
||||||
for index_uid in index_scheduler.index_names()? {
|
for index_uid in index_scheduler.index_names()? {
|
||||||
// Accumulate the size of all indexes, even unauthorized ones, so
|
// Accumulate the size of all indexes, even unauthorized ones, so
|
||||||
@@ -266,6 +269,7 @@ pub fn create_all_stats(
|
|||||||
// See <https://github.com/meilisearch/meilisearch/pull/3541#discussion_r1126747643> for context.
|
// See <https://github.com/meilisearch/meilisearch/pull/3541#discussion_r1126747643> for context.
|
||||||
let stats = index_scheduler.index_stats(&index_uid)?;
|
let stats = index_scheduler.index_stats(&index_uid)?;
|
||||||
database_size += stats.inner_stats.database_size;
|
database_size += stats.inner_stats.database_size;
|
||||||
|
used_database_size += stats.inner_stats.used_database_size;
|
||||||
|
|
||||||
if !filters.is_index_authorized(&index_uid) {
|
if !filters.is_index_authorized(&index_uid) {
|
||||||
continue;
|
continue;
|
||||||
@@ -278,10 +282,14 @@ pub fn create_all_stats(
|
|||||||
}
|
}
|
||||||
|
|
||||||
database_size += index_scheduler.size()?;
|
database_size += index_scheduler.size()?;
|
||||||
|
used_database_size += index_scheduler.used_size()?;
|
||||||
database_size += auth_controller.size()?;
|
database_size += auth_controller.size()?;
|
||||||
database_size += index_scheduler.compute_update_file_size()?;
|
used_database_size += auth_controller.used_size()?;
|
||||||
|
let update_file_size = index_scheduler.compute_update_file_size()?;
|
||||||
|
database_size += update_file_size;
|
||||||
|
used_database_size += update_file_size;
|
||||||
|
|
||||||
let stats = Stats { database_size, last_update: last_task, indexes };
|
let stats = Stats { database_size, used_database_size, last_update: last_task, indexes };
|
||||||
Ok(stats)
|
Ok(stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ pub struct DetailsView {
|
|||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub deleted_tasks: Option<Option<u64>>,
|
pub deleted_tasks: Option<Option<u64>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub original_filter: Option<String>,
|
pub original_filter: Option<Option<String>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub dump_uid: Option<Option<String>>,
|
pub dump_uid: Option<Option<String>>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
@@ -131,12 +131,13 @@ impl From<Details> for DetailsView {
|
|||||||
} => DetailsView {
|
} => DetailsView {
|
||||||
provided_ids: Some(received_document_ids),
|
provided_ids: Some(received_document_ids),
|
||||||
deleted_documents: Some(deleted_documents),
|
deleted_documents: Some(deleted_documents),
|
||||||
|
original_filter: Some(None),
|
||||||
..DetailsView::default()
|
..DetailsView::default()
|
||||||
},
|
},
|
||||||
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => {
|
Details::DocumentDeletionByFilter { original_filter, deleted_documents } => {
|
||||||
DetailsView {
|
DetailsView {
|
||||||
provided_ids: Some(0),
|
provided_ids: Some(0),
|
||||||
original_filter: Some(original_filter),
|
original_filter: Some(Some(original_filter)),
|
||||||
deleted_documents: Some(deleted_documents),
|
deleted_documents: Some(deleted_documents),
|
||||||
..DetailsView::default()
|
..DetailsView::default()
|
||||||
}
|
}
|
||||||
@@ -148,7 +149,7 @@ impl From<Details> for DetailsView {
|
|||||||
DetailsView {
|
DetailsView {
|
||||||
matched_tasks: Some(matched_tasks),
|
matched_tasks: Some(matched_tasks),
|
||||||
canceled_tasks: Some(canceled_tasks),
|
canceled_tasks: Some(canceled_tasks),
|
||||||
original_filter: Some(original_filter),
|
original_filter: Some(Some(original_filter)),
|
||||||
..DetailsView::default()
|
..DetailsView::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -156,7 +157,7 @@ impl From<Details> for DetailsView {
|
|||||||
DetailsView {
|
DetailsView {
|
||||||
matched_tasks: Some(matched_tasks),
|
matched_tasks: Some(matched_tasks),
|
||||||
deleted_tasks: Some(deleted_tasks),
|
deleted_tasks: Some(deleted_tasks),
|
||||||
original_filter: Some(original_filter),
|
original_filter: Some(Some(original_filter)),
|
||||||
..DetailsView::default()
|
..DetailsView::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -729,7 +730,7 @@ mod tests {
|
|||||||
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
let err = deserr_query_params::<TaskDeletionOrCancelationQuery>(params).unwrap_err();
|
||||||
snapshot!(meili_snap::json_string!(err), @r###"
|
snapshot!(meili_snap::json_string!(err), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `createIndex` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -16,8 +16,11 @@ pub static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'
|
|||||||
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
||||||
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
|
("POST", "/indexes/products/documents") => hashset!{"documents.add", "documents.*", "*"},
|
||||||
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
|
("GET", "/indexes/products/documents") => hashset!{"documents.get", "documents.*", "*"},
|
||||||
|
("POST", "/indexes/products/documents/fetch") => hashset!{"documents.get", "documents.*", "*"},
|
||||||
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
|
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "documents.*", "*"},
|
||||||
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
|
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "documents.*", "*"},
|
||||||
|
("POST", "/indexes/products/documents/delete-batch") => hashset!{"documents.delete", "documents.*", "*"},
|
||||||
|
("POST", "/indexes/products/documents/delete") => hashset!{"documents.delete", "documents.*", "*"},
|
||||||
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
|
("GET", "/tasks") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||||
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
|
("DELETE", "/tasks") => hashset!{"tasks.delete", "tasks.*", "*"},
|
||||||
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
|
("GET", "/tasks?indexUid=products") => hashset!{"tasks.get", "tasks.*", "*"},
|
||||||
|
|||||||
@@ -1781,7 +1781,7 @@ async fn error_add_documents_payload_size() {
|
|||||||
snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
snapshot!(json_string!(response, { ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" }),
|
||||||
@r###"
|
@r###"
|
||||||
{
|
{
|
||||||
"message": "The provided payload reached the size limit.",
|
"message": "The provided payload reached the size limit. The maximum accepted payload size is 10.00 MiB.",
|
||||||
"code": "payload_too_large",
|
"code": "payload_too_large",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#payload_too_large"
|
"link": "https://docs.meilisearch.com/errors#payload_too_large"
|
||||||
|
|||||||
@@ -180,9 +180,9 @@ async fn get_all_documents_bad_filter() {
|
|||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo=bernese",
|
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo=bernese",
|
||||||
"code": "invalid_search_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
@@ -547,9 +547,9 @@ async fn delete_document_by_filter() {
|
|||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.",
|
"message": "Invalid syntax for the filter parameter: `expected String, Array, found: true`.",
|
||||||
"code": "invalid_document_delete_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -559,9 +559,9 @@ async fn delete_document_by_filter() {
|
|||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
|
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `hello`.\n1:6 hello",
|
||||||
"code": "invalid_document_delete_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -571,9 +571,21 @@ async fn delete_document_by_filter() {
|
|||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Sending an empty filter is forbidden.",
|
"message": "Sending an empty filter is forbidden.",
|
||||||
"code": "invalid_document_delete_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_document_delete_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
// do not send any filter
|
||||||
|
let (response, code) = index.delete_document_by_filter(json!({})).await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"message": "Missing field `filter`",
|
||||||
|
"code": "missing_document_filter",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#missing_document_filter"
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
@@ -630,9 +642,9 @@ async fn delete_document_by_filter() {
|
|||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||||
"code": "invalid_search_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
},
|
},
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
"enqueuedAt": "[date]",
|
"enqueuedAt": "[date]",
|
||||||
@@ -664,9 +676,9 @@ async fn delete_document_by_filter() {
|
|||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"message": "Attribute `catto` is not filterable. Available filterable attributes are: `doggo`.\n1:6 catto = jorts",
|
"message": "Attribute `catto` is not filterable. Available filterable attributes are: `doggo`.\n1:6 catto = jorts",
|
||||||
"code": "invalid_search_filter",
|
"code": "invalid_document_filter",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_search_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
},
|
},
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
"enqueuedAt": "[date]",
|
"enqueuedAt": "[date]",
|
||||||
@@ -748,4 +760,27 @@ async fn fetch_document_by_filter() {
|
|||||||
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
}
|
}
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
|
let (response, code) = index.get_document_by_filter(json!({ "filter": "cool doggo" })).await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"message": "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, `IS NULL`, `IS NOT NULL`, `IS EMPTY`, `IS NOT EMPTY`, `_geoRadius`, or `_geoBoundingBox` at `cool doggo`.\n1:11 cool doggo",
|
||||||
|
"code": "invalid_document_filter",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
let (response, code) =
|
||||||
|
index.get_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||||
|
snapshot!(code, @"400 Bad Request");
|
||||||
|
snapshot!(json_string!(response), @r###"
|
||||||
|
{
|
||||||
|
"message": "Attribute `doggo` is not filterable. Available filterable attributes are: `color`.\n1:6 doggo = bernese",
|
||||||
|
"code": "invalid_document_filter",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -946,7 +946,7 @@ async fn sort_unset_ranking_rule() {
|
|||||||
index.wait_task(1).await;
|
index.wait_task(1).await;
|
||||||
|
|
||||||
let expected_response = json!({
|
let expected_response = json!({
|
||||||
"message": "The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.",
|
"message": "You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.",
|
||||||
"code": "invalid_search_sort",
|
"code": "invalid_search_sort",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_search_sort"
|
"link": "https://docs.meilisearch.com/errors#invalid_search_sort"
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
@@ -108,7 +108,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
@@ -119,7 +119,7 @@ async fn task_bad_types() {
|
|||||||
snapshot!(code, @"400 Bad Request");
|
snapshot!(code, @"400 Bad Request");
|
||||||
snapshot!(json_string!(response), @r###"
|
snapshot!(json_string!(response), @r###"
|
||||||
{
|
{
|
||||||
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `documentDeletionByFilter`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
"message": "Invalid value in parameter `types`: `doggo` is not a valid task type. Available types are `documentAdditionOrUpdate`, `documentDeletion`, `settingsUpdate`, `indexCreation`, `indexDeletion`, `indexUpdate`, `indexSwap`, `taskCancelation`, `taskDeletion`, `dumpCreation`, `snapshotCreation`.",
|
||||||
"code": "invalid_task_types",
|
"code": "invalid_task_types",
|
||||||
"type": "invalid_request",
|
"type": "invalid_request",
|
||||||
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
"link": "https://docs.meilisearch.com/errors#invalid_task_types"
|
||||||
|
|||||||
@@ -413,7 +413,7 @@ async fn test_summarized_document_addition_or_update() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_web::test]
|
#[actix_web::test]
|
||||||
async fn test_summarized_delete_batch() {
|
async fn test_summarized_delete_documents_by_batch() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
index.delete_batch(vec![1, 2, 3]).await;
|
index.delete_batch(vec![1, 2, 3]).await;
|
||||||
@@ -430,7 +430,8 @@ async fn test_summarized_delete_batch() {
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"providedIds": 3,
|
"providedIds": 3,
|
||||||
"deletedDocuments": 0
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": null
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"message": "Index `test` not found.",
|
"message": "Index `test` not found.",
|
||||||
@@ -460,7 +461,8 @@ async fn test_summarized_delete_batch() {
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"providedIds": 1,
|
"providedIds": 1,
|
||||||
"deletedDocuments": 0
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": null
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
@@ -472,7 +474,100 @@ async fn test_summarized_delete_batch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_web::test]
|
#[actix_web::test]
|
||||||
async fn test_summarized_delete_document() {
|
async fn test_summarized_delete_documents_by_filter() {
|
||||||
|
let server = Server::new().await;
|
||||||
|
let index = server.index("test");
|
||||||
|
|
||||||
|
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||||
|
index.wait_task(0).await;
|
||||||
|
let (task, _) = index.get_task(0).await;
|
||||||
|
assert_json_snapshot!(task,
|
||||||
|
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||||
|
@r###"
|
||||||
|
{
|
||||||
|
"uid": 0,
|
||||||
|
"indexUid": "test",
|
||||||
|
"status": "failed",
|
||||||
|
"type": "documentDeletion",
|
||||||
|
"canceledBy": null,
|
||||||
|
"details": {
|
||||||
|
"providedIds": 0,
|
||||||
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": "\"doggo = bernese\""
|
||||||
|
},
|
||||||
|
"error": {
|
||||||
|
"message": "Index `test` not found.",
|
||||||
|
"code": "index_not_found",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#index_not_found"
|
||||||
|
},
|
||||||
|
"duration": "[duration]",
|
||||||
|
"enqueuedAt": "[date]",
|
||||||
|
"startedAt": "[date]",
|
||||||
|
"finishedAt": "[date]"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
index.create(None).await;
|
||||||
|
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||||
|
index.wait_task(2).await;
|
||||||
|
let (task, _) = index.get_task(2).await;
|
||||||
|
assert_json_snapshot!(task,
|
||||||
|
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||||
|
@r###"
|
||||||
|
{
|
||||||
|
"uid": 2,
|
||||||
|
"indexUid": "test",
|
||||||
|
"status": "failed",
|
||||||
|
"type": "documentDeletion",
|
||||||
|
"canceledBy": null,
|
||||||
|
"details": {
|
||||||
|
"providedIds": 0,
|
||||||
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": "\"doggo = bernese\""
|
||||||
|
},
|
||||||
|
"error": {
|
||||||
|
"message": "Attribute `doggo` is not filterable. This index does not have configured filterable attributes.\n1:6 doggo = bernese",
|
||||||
|
"code": "invalid_document_filter",
|
||||||
|
"type": "invalid_request",
|
||||||
|
"link": "https://docs.meilisearch.com/errors#invalid_document_filter"
|
||||||
|
},
|
||||||
|
"duration": "[duration]",
|
||||||
|
"enqueuedAt": "[date]",
|
||||||
|
"startedAt": "[date]",
|
||||||
|
"finishedAt": "[date]"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
|
||||||
|
index.update_settings(json!({ "filterableAttributes": ["doggo"] })).await;
|
||||||
|
index.delete_document_by_filter(json!({ "filter": "doggo = bernese" })).await;
|
||||||
|
index.wait_task(4).await;
|
||||||
|
let (task, _) = index.get_task(4).await;
|
||||||
|
assert_json_snapshot!(task,
|
||||||
|
{ ".duration" => "[duration]", ".enqueuedAt" => "[date]", ".startedAt" => "[date]", ".finishedAt" => "[date]" },
|
||||||
|
@r###"
|
||||||
|
{
|
||||||
|
"uid": 4,
|
||||||
|
"indexUid": "test",
|
||||||
|
"status": "succeeded",
|
||||||
|
"type": "documentDeletion",
|
||||||
|
"canceledBy": null,
|
||||||
|
"details": {
|
||||||
|
"providedIds": 0,
|
||||||
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": "\"doggo = bernese\""
|
||||||
|
},
|
||||||
|
"error": null,
|
||||||
|
"duration": "[duration]",
|
||||||
|
"enqueuedAt": "[date]",
|
||||||
|
"startedAt": "[date]",
|
||||||
|
"finishedAt": "[date]"
|
||||||
|
}
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[actix_web::test]
|
||||||
|
async fn test_summarized_delete_document_by_id() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
index.delete_document(1).await;
|
index.delete_document(1).await;
|
||||||
@@ -489,7 +584,8 @@ async fn test_summarized_delete_document() {
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"providedIds": 1,
|
"providedIds": 1,
|
||||||
"deletedDocuments": 0
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": null
|
||||||
},
|
},
|
||||||
"error": {
|
"error": {
|
||||||
"message": "Index `test` not found.",
|
"message": "Index `test` not found.",
|
||||||
@@ -519,7 +615,8 @@ async fn test_summarized_delete_document() {
|
|||||||
"canceledBy": null,
|
"canceledBy": null,
|
||||||
"details": {
|
"details": {
|
||||||
"providedIds": 1,
|
"providedIds": 1,
|
||||||
"deletedDocuments": 0
|
"deletedDocuments": 0,
|
||||||
|
"originalFilter": null
|
||||||
},
|
},
|
||||||
"error": null,
|
"error": null,
|
||||||
"duration": "[duration]",
|
"duration": "[duration]",
|
||||||
|
|||||||
@@ -25,8 +25,13 @@ flatten-serde-json = { path = "../flatten-serde-json" }
|
|||||||
fst = "0.4.7"
|
fst = "0.4.7"
|
||||||
fxhash = "0.2.1"
|
fxhash = "0.2.1"
|
||||||
geoutils = "0.5.1"
|
geoutils = "0.5.1"
|
||||||
grenad = { version = "0.4.4", default-features = false, features = ["tempfile"] }
|
grenad = { version = "0.4.4", default-features = false, features = [
|
||||||
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.5", default-features = false, features = ["lmdb", "sync-read-txn"] }
|
"tempfile",
|
||||||
|
] }
|
||||||
|
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.6", default-features = false, features = [
|
||||||
|
"lmdb",
|
||||||
|
"sync-read-txn",
|
||||||
|
] }
|
||||||
json-depth-checker = { path = "../json-depth-checker" }
|
json-depth-checker = { path = "../json-depth-checker" }
|
||||||
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
||||||
memmap2 = "0.5.10"
|
memmap2 = "0.5.10"
|
||||||
@@ -44,7 +49,12 @@ smallvec = "1.10.0"
|
|||||||
smartstring = "1.0.1"
|
smartstring = "1.0.1"
|
||||||
tempfile = "3.5.0"
|
tempfile = "3.5.0"
|
||||||
thiserror = "1.0.40"
|
thiserror = "1.0.40"
|
||||||
time = { version = "0.3.20", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
time = { version = "0.3.20", features = [
|
||||||
|
"serde-well-known",
|
||||||
|
"formatting",
|
||||||
|
"parsing",
|
||||||
|
"macros",
|
||||||
|
] }
|
||||||
uuid = { version = "1.3.1", features = ["v4"] }
|
uuid = { version = "1.3.1", features = ["v4"] }
|
||||||
|
|
||||||
filter-parser = { path = "../filter-parser" }
|
filter-parser = { path = "../filter-parser" }
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ only composed of alphanumeric characters (a-z A-Z 0-9), hyphens (-) and undersco
|
|||||||
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> },
|
InvalidSortableAttribute { field: String, valid_fields: BTreeSet<String> },
|
||||||
#[error("{}", HeedError::BadOpenOptions)]
|
#[error("{}", HeedError::BadOpenOptions)]
|
||||||
InvalidLmdbOpenOptions,
|
InvalidLmdbOpenOptions,
|
||||||
#[error("The sort ranking rule must be specified in the ranking rules settings to use the sort parameter at search time.")]
|
#[error("You must specify where `sort` is listed in the rankingRules setting to use the sort parameter at search time.")]
|
||||||
SortRankingRuleMissing,
|
SortRankingRuleMissing,
|
||||||
#[error("The database file is in an invalid state.")]
|
#[error("The database file is in an invalid state.")]
|
||||||
InvalidStoreFile,
|
InvalidStoreFile,
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ impl CboRoaringBitmapCodec {
|
|||||||
} else {
|
} else {
|
||||||
// Otherwise, it means we used the classic RoaringBitmapCodec and
|
// Otherwise, it means we used the classic RoaringBitmapCodec and
|
||||||
// that the header takes threshold integers.
|
// that the header takes threshold integers.
|
||||||
RoaringBitmap::deserialize_from(bytes)
|
RoaringBitmap::deserialize_unchecked_from(bytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ impl CboRoaringBitmapCodec {
|
|||||||
vec.push(integer);
|
vec.push(integer);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
roaring |= RoaringBitmap::deserialize_from(bytes.as_ref())?;
|
roaring |= RoaringBitmap::deserialize_unchecked_from(bytes.as_ref())?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ impl heed::BytesDecode<'_> for RoaringBitmapCodec {
|
|||||||
type DItem = RoaringBitmap;
|
type DItem = RoaringBitmap;
|
||||||
|
|
||||||
fn bytes_decode(bytes: &[u8]) -> Option<Self::DItem> {
|
fn bytes_decode(bytes: &[u8]) -> Option<Self::DItem> {
|
||||||
RoaringBitmap::deserialize_from(bytes).ok()
|
RoaringBitmap::deserialize_unchecked_from(bytes).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,10 +21,9 @@ use crate::heed_codec::facet::{
|
|||||||
};
|
};
|
||||||
use crate::heed_codec::{ScriptLanguageCodec, StrBEU16Codec, StrRefCodec};
|
use crate::heed_codec::{ScriptLanguageCodec, StrBEU16Codec, StrRefCodec};
|
||||||
use crate::{
|
use crate::{
|
||||||
default_criteria, BEU32StrCodec, BoRoaringBitmapCodec, CboRoaringBitmapCodec, Criterion,
|
default_criteria, CboRoaringBitmapCodec, Criterion, DocumentId, ExternalDocumentsIds,
|
||||||
DocumentId, ExternalDocumentsIds, FacetDistribution, FieldDistribution, FieldId,
|
FacetDistribution, FieldDistribution, FieldId, FieldIdWordCountCodec, GeoPoint, ObkvCodec,
|
||||||
FieldIdWordCountCodec, GeoPoint, ObkvCodec, Result, RoaringBitmapCodec, RoaringBitmapLenCodec,
|
Result, RoaringBitmapCodec, RoaringBitmapLenCodec, Search, U8StrStrCodec, BEU16, BEU32,
|
||||||
Search, U8StrStrCodec, BEU16, BEU32,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const DEFAULT_MIN_WORD_LEN_ONE_TYPO: u8 = 5;
|
pub const DEFAULT_MIN_WORD_LEN_ONE_TYPO: u8 = 5;
|
||||||
@@ -111,9 +110,6 @@ pub struct Index {
|
|||||||
/// A prefix of word and all the documents ids containing this prefix, from attributes for which typos are not allowed.
|
/// A prefix of word and all the documents ids containing this prefix, from attributes for which typos are not allowed.
|
||||||
pub exact_word_prefix_docids: Database<Str, RoaringBitmapCodec>,
|
pub exact_word_prefix_docids: Database<Str, RoaringBitmapCodec>,
|
||||||
|
|
||||||
/// Maps a word and a document id (u32) to all the positions where the given word appears.
|
|
||||||
pub docid_word_positions: Database<BEU32StrCodec, BoRoaringBitmapCodec>,
|
|
||||||
|
|
||||||
/// Maps the proximity between a pair of words with all the docids where this relation appears.
|
/// Maps the proximity between a pair of words with all the docids where this relation appears.
|
||||||
pub word_pair_proximity_docids: Database<U8StrStrCodec, CboRoaringBitmapCodec>,
|
pub word_pair_proximity_docids: Database<U8StrStrCodec, CboRoaringBitmapCodec>,
|
||||||
/// Maps the proximity between a pair of word and prefix with all the docids where this relation appears.
|
/// Maps the proximity between a pair of word and prefix with all the docids where this relation appears.
|
||||||
@@ -170,33 +166,45 @@ impl Index {
|
|||||||
unsafe { options.flag(Flags::MdbAlwaysFreePages) };
|
unsafe { options.flag(Flags::MdbAlwaysFreePages) };
|
||||||
|
|
||||||
let env = options.open(path)?;
|
let env = options.open(path)?;
|
||||||
let main = env.create_poly_database(Some(MAIN))?;
|
let mut wtxn = env.write_txn()?;
|
||||||
let word_docids = env.create_database(Some(WORD_DOCIDS))?;
|
let main = env.create_poly_database(&mut wtxn, Some(MAIN))?;
|
||||||
let exact_word_docids = env.create_database(Some(EXACT_WORD_DOCIDS))?;
|
let word_docids = env.create_database(&mut wtxn, Some(WORD_DOCIDS))?;
|
||||||
let word_prefix_docids = env.create_database(Some(WORD_PREFIX_DOCIDS))?;
|
let exact_word_docids = env.create_database(&mut wtxn, Some(EXACT_WORD_DOCIDS))?;
|
||||||
let exact_word_prefix_docids = env.create_database(Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
let word_prefix_docids = env.create_database(&mut wtxn, Some(WORD_PREFIX_DOCIDS))?;
|
||||||
let docid_word_positions = env.create_database(Some(DOCID_WORD_POSITIONS))?;
|
let exact_word_prefix_docids =
|
||||||
let word_pair_proximity_docids = env.create_database(Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(EXACT_WORD_PREFIX_DOCIDS))?;
|
||||||
let script_language_docids = env.create_database(Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
let word_pair_proximity_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
|
let script_language_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(SCRIPT_LANGUAGE_DOCIDS))?;
|
||||||
let word_prefix_pair_proximity_docids =
|
let word_prefix_pair_proximity_docids =
|
||||||
env.create_database(Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(WORD_PREFIX_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
let prefix_word_pair_proximity_docids =
|
let prefix_word_pair_proximity_docids =
|
||||||
env.create_database(Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(PREFIX_WORD_PAIR_PROXIMITY_DOCIDS))?;
|
||||||
let word_position_docids = env.create_database(Some(WORD_POSITION_DOCIDS))?;
|
let word_position_docids = env.create_database(&mut wtxn, Some(WORD_POSITION_DOCIDS))?;
|
||||||
let word_fid_docids = env.create_database(Some(WORD_FIELD_ID_DOCIDS))?;
|
let word_fid_docids = env.create_database(&mut wtxn, Some(WORD_FIELD_ID_DOCIDS))?;
|
||||||
let field_id_word_count_docids = env.create_database(Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
let field_id_word_count_docids =
|
||||||
let word_prefix_position_docids = env.create_database(Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(FIELD_ID_WORD_COUNT_DOCIDS))?;
|
||||||
let word_prefix_fid_docids = env.create_database(Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
let word_prefix_position_docids =
|
||||||
let facet_id_f64_docids = env.create_database(Some(FACET_ID_F64_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(WORD_PREFIX_POSITION_DOCIDS))?;
|
||||||
let facet_id_string_docids = env.create_database(Some(FACET_ID_STRING_DOCIDS))?;
|
let word_prefix_fid_docids =
|
||||||
let facet_id_exists_docids = env.create_database(Some(FACET_ID_EXISTS_DOCIDS))?;
|
env.create_database(&mut wtxn, Some(WORD_PREFIX_FIELD_ID_DOCIDS))?;
|
||||||
let facet_id_is_null_docids = env.create_database(Some(FACET_ID_IS_NULL_DOCIDS))?;
|
let facet_id_f64_docids = env.create_database(&mut wtxn, Some(FACET_ID_F64_DOCIDS))?;
|
||||||
let facet_id_is_empty_docids = env.create_database(Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
let facet_id_string_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(FACET_ID_STRING_DOCIDS))?;
|
||||||
|
let facet_id_exists_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(FACET_ID_EXISTS_DOCIDS))?;
|
||||||
|
let facet_id_is_null_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(FACET_ID_IS_NULL_DOCIDS))?;
|
||||||
|
let facet_id_is_empty_docids =
|
||||||
|
env.create_database(&mut wtxn, Some(FACET_ID_IS_EMPTY_DOCIDS))?;
|
||||||
|
|
||||||
let field_id_docid_facet_f64s = env.create_database(Some(FIELD_ID_DOCID_FACET_F64S))?;
|
let field_id_docid_facet_f64s =
|
||||||
|
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_F64S))?;
|
||||||
let field_id_docid_facet_strings =
|
let field_id_docid_facet_strings =
|
||||||
env.create_database(Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
env.create_database(&mut wtxn, Some(FIELD_ID_DOCID_FACET_STRINGS))?;
|
||||||
let documents = env.create_database(Some(DOCUMENTS))?;
|
let documents = env.create_database(&mut wtxn, Some(DOCUMENTS))?;
|
||||||
|
wtxn.commit()?;
|
||||||
|
|
||||||
Index::set_creation_dates(&env, main, created_at, updated_at)?;
|
Index::set_creation_dates(&env, main, created_at, updated_at)?;
|
||||||
|
|
||||||
@@ -207,7 +215,6 @@ impl Index {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
script_language_docids,
|
script_language_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
|
|||||||
@@ -5,52 +5,6 @@
|
|||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
pub static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||||
|
|
||||||
// #[cfg(test)]
|
|
||||||
// pub mod allocator {
|
|
||||||
// use std::alloc::{GlobalAlloc, System};
|
|
||||||
// use std::sync::atomic::{self, AtomicI64};
|
|
||||||
|
|
||||||
// #[global_allocator]
|
|
||||||
// pub static ALLOC: CountingAlloc = CountingAlloc {
|
|
||||||
// max_resident: AtomicI64::new(0),
|
|
||||||
// resident: AtomicI64::new(0),
|
|
||||||
// allocated: AtomicI64::new(0),
|
|
||||||
// };
|
|
||||||
|
|
||||||
// pub struct CountingAlloc {
|
|
||||||
// pub max_resident: AtomicI64,
|
|
||||||
// pub resident: AtomicI64,
|
|
||||||
// pub allocated: AtomicI64,
|
|
||||||
// }
|
|
||||||
// unsafe impl GlobalAlloc for CountingAlloc {
|
|
||||||
// unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
|
|
||||||
// self.allocated.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
|
||||||
// let old_resident =
|
|
||||||
// self.resident.fetch_add(layout.size() as i64, atomic::Ordering::SeqCst);
|
|
||||||
|
|
||||||
// let resident = old_resident + layout.size() as i64;
|
|
||||||
// self.max_resident.fetch_max(resident, atomic::Ordering::SeqCst);
|
|
||||||
|
|
||||||
// // if layout.size() > 1_000_000 {
|
|
||||||
// // eprintln!(
|
|
||||||
// // "allocating {} with new resident size: {resident}",
|
|
||||||
// // layout.size() / 1_000_000
|
|
||||||
// // );
|
|
||||||
// // // let trace = std::backtrace::Backtrace::capture();
|
|
||||||
// // // let t = trace.to_string();
|
|
||||||
// // // eprintln!("{t}");
|
|
||||||
// // }
|
|
||||||
|
|
||||||
// System.alloc(layout)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
|
|
||||||
// self.resident.fetch_sub(layout.size() as i64, atomic::Ordering::Relaxed);
|
|
||||||
// System.dealloc(ptr, layout)
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
pub mod documents;
|
pub mod documents;
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use std::io::{BufWriter, Write};
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
// use rand::random;
|
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use crate::search::new::interner::Interned;
|
use crate::search::new::interner::Interned;
|
||||||
@@ -463,7 +462,7 @@ fill: \"#B6E2D3\"
|
|||||||
shape: class
|
shape: class
|
||||||
max_nbr_typo: {}",
|
max_nbr_typo: {}",
|
||||||
term_subset.description(ctx),
|
term_subset.description(ctx),
|
||||||
term_subset.max_nbr_typos(ctx)
|
term_subset.max_typo_cost(ctx)
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
for w in term_subset.all_single_words_except_prefix_db(ctx)? {
|
for w in term_subset.all_single_words_except_prefix_db(ctx)? {
|
||||||
@@ -490,13 +489,6 @@ fill: \"#B6E2D3\"
|
|||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
// fn write_words_graph(&mut self, qg: QueryGraph) -> Result<()> {
|
|
||||||
// self.make_new_file_for_internal_state_if_needed()?;
|
|
||||||
|
|
||||||
// self.write_query_graph(&qg)?;
|
|
||||||
|
|
||||||
// Ok(())
|
|
||||||
// }
|
|
||||||
fn write_rr_graph<R: RankingRuleGraphTrait>(
|
fn write_rr_graph<R: RankingRuleGraphTrait>(
|
||||||
&mut self,
|
&mut self,
|
||||||
graph: &RankingRuleGraph<R>,
|
graph: &RankingRuleGraph<R>,
|
||||||
|
|||||||
@@ -28,16 +28,14 @@ pub enum ZeroOrOneTypo {
|
|||||||
impl Interned<QueryTerm> {
|
impl Interned<QueryTerm> {
|
||||||
pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> {
|
pub fn compute_fully_if_needed(self, ctx: &mut SearchContext) -> Result<()> {
|
||||||
let s = ctx.term_interner.get_mut(self);
|
let s = ctx.term_interner.get_mut(self);
|
||||||
if s.max_nbr_typos == 0 {
|
if s.max_levenshtein_distance <= 1 && s.one_typo.is_uninit() {
|
||||||
s.one_typo = Lazy::Init(OneTypoTerm::default());
|
|
||||||
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
|
||||||
} else if s.max_nbr_typos == 1 && s.one_typo.is_uninit() {
|
|
||||||
assert!(s.two_typo.is_uninit());
|
assert!(s.two_typo.is_uninit());
|
||||||
|
// Initialize one_typo subterm even if max_nbr_typo is 0 because of split words
|
||||||
self.initialize_one_typo_subterm(ctx)?;
|
self.initialize_one_typo_subterm(ctx)?;
|
||||||
let s = ctx.term_interner.get_mut(self);
|
let s = ctx.term_interner.get_mut(self);
|
||||||
assert!(s.one_typo.is_init());
|
assert!(s.one_typo.is_init());
|
||||||
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
s.two_typo = Lazy::Init(TwoTypoTerm::default());
|
||||||
} else if s.max_nbr_typos > 1 && s.two_typo.is_uninit() {
|
} else if s.max_levenshtein_distance > 1 && s.two_typo.is_uninit() {
|
||||||
assert!(s.two_typo.is_uninit());
|
assert!(s.two_typo.is_uninit());
|
||||||
self.initialize_one_and_two_typo_subterm(ctx)?;
|
self.initialize_one_and_two_typo_subterm(ctx)?;
|
||||||
let s = ctx.term_interner.get_mut(self);
|
let s = ctx.term_interner.get_mut(self);
|
||||||
@@ -187,7 +185,7 @@ pub fn partially_initialized_term_from_word(
|
|||||||
original: ctx.word_interner.insert(word.to_owned()),
|
original: ctx.word_interner.insert(word.to_owned()),
|
||||||
ngram_words: None,
|
ngram_words: None,
|
||||||
is_prefix: false,
|
is_prefix: false,
|
||||||
max_nbr_typos: 0,
|
max_levenshtein_distance: 0,
|
||||||
zero_typo: <_>::default(),
|
zero_typo: <_>::default(),
|
||||||
one_typo: Lazy::Init(<_>::default()),
|
one_typo: Lazy::Init(<_>::default()),
|
||||||
two_typo: Lazy::Init(<_>::default()),
|
two_typo: Lazy::Init(<_>::default()),
|
||||||
@@ -258,7 +256,7 @@ pub fn partially_initialized_term_from_word(
|
|||||||
Ok(QueryTerm {
|
Ok(QueryTerm {
|
||||||
original: word_interned,
|
original: word_interned,
|
||||||
ngram_words: None,
|
ngram_words: None,
|
||||||
max_nbr_typos: max_typo,
|
max_levenshtein_distance: max_typo,
|
||||||
is_prefix,
|
is_prefix,
|
||||||
zero_typo,
|
zero_typo,
|
||||||
one_typo: Lazy::Uninit,
|
one_typo: Lazy::Uninit,
|
||||||
@@ -277,7 +275,16 @@ fn find_split_words(ctx: &mut SearchContext, word: &str) -> Result<Option<Intern
|
|||||||
impl Interned<QueryTerm> {
|
impl Interned<QueryTerm> {
|
||||||
fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
fn initialize_one_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||||
let self_mut = ctx.term_interner.get_mut(self);
|
let self_mut = ctx.term_interner.get_mut(self);
|
||||||
let QueryTerm { original, is_prefix, one_typo, .. } = self_mut;
|
|
||||||
|
let allows_split_words = self_mut.allows_split_words();
|
||||||
|
let QueryTerm {
|
||||||
|
original,
|
||||||
|
is_prefix,
|
||||||
|
one_typo,
|
||||||
|
max_levenshtein_distance: max_nbr_typos,
|
||||||
|
..
|
||||||
|
} = self_mut;
|
||||||
|
|
||||||
let original = *original;
|
let original = *original;
|
||||||
let is_prefix = *is_prefix;
|
let is_prefix = *is_prefix;
|
||||||
// let original_str = ctx.word_interner.get(*original).to_owned();
|
// let original_str = ctx.word_interner.get(*original).to_owned();
|
||||||
@@ -286,6 +293,7 @@ impl Interned<QueryTerm> {
|
|||||||
}
|
}
|
||||||
let mut one_typo_words = BTreeSet::new();
|
let mut one_typo_words = BTreeSet::new();
|
||||||
|
|
||||||
|
if *max_nbr_typos > 0 {
|
||||||
find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| {
|
find_zero_one_typo_derivations(ctx, original, is_prefix, |derived_word, nbr_typos| {
|
||||||
match nbr_typos {
|
match nbr_typos {
|
||||||
ZeroOrOneTypo::Zero => {}
|
ZeroOrOneTypo::Zero => {}
|
||||||
@@ -299,13 +307,19 @@ impl Interned<QueryTerm> {
|
|||||||
}
|
}
|
||||||
Ok(ControlFlow::Continue(()))
|
Ok(ControlFlow::Continue(()))
|
||||||
})?;
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let split_words = if allows_split_words {
|
||||||
let original_str = ctx.word_interner.get(original).to_owned();
|
let original_str = ctx.word_interner.get(original).to_owned();
|
||||||
let split_words = find_split_words(ctx, original_str.as_str())?;
|
find_split_words(ctx, original_str.as_str())?
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let self_mut = ctx.term_interner.get_mut(self);
|
let self_mut = ctx.term_interner.get_mut(self);
|
||||||
|
|
||||||
// Only add the split words to the derivations if:
|
// Only add the split words to the derivations if:
|
||||||
// 1. the term is not an ngram; OR
|
// 1. the term is neither an ngram nor a phrase; OR
|
||||||
// 2. the term is an ngram, but the split words are different from the ngram's component words
|
// 2. the term is an ngram, but the split words are different from the ngram's component words
|
||||||
let split_words = if let Some((ngram_words, split_words)) =
|
let split_words = if let Some((ngram_words, split_words)) =
|
||||||
self_mut.ngram_words.as_ref().zip(split_words.as_ref())
|
self_mut.ngram_words.as_ref().zip(split_words.as_ref())
|
||||||
@@ -327,7 +341,13 @@ impl Interned<QueryTerm> {
|
|||||||
}
|
}
|
||||||
fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
fn initialize_one_and_two_typo_subterm(self, ctx: &mut SearchContext) -> Result<()> {
|
||||||
let self_mut = ctx.term_interner.get_mut(self);
|
let self_mut = ctx.term_interner.get_mut(self);
|
||||||
let QueryTerm { original, is_prefix, two_typo, .. } = self_mut;
|
let QueryTerm {
|
||||||
|
original,
|
||||||
|
is_prefix,
|
||||||
|
two_typo,
|
||||||
|
max_levenshtein_distance: max_nbr_typos,
|
||||||
|
..
|
||||||
|
} = self_mut;
|
||||||
let original_str = ctx.word_interner.get(*original).to_owned();
|
let original_str = ctx.word_interner.get(*original).to_owned();
|
||||||
if two_typo.is_init() {
|
if two_typo.is_init() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@@ -335,6 +355,7 @@ impl Interned<QueryTerm> {
|
|||||||
let mut one_typo_words = BTreeSet::new();
|
let mut one_typo_words = BTreeSet::new();
|
||||||
let mut two_typo_words = BTreeSet::new();
|
let mut two_typo_words = BTreeSet::new();
|
||||||
|
|
||||||
|
if *max_nbr_typos > 0 {
|
||||||
find_zero_one_two_typo_derivations(
|
find_zero_one_two_typo_derivations(
|
||||||
*original,
|
*original,
|
||||||
*is_prefix,
|
*is_prefix,
|
||||||
@@ -363,6 +384,8 @@ impl Interned<QueryTerm> {
|
|||||||
Ok(ControlFlow::Continue(()))
|
Ok(ControlFlow::Continue(()))
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
let split_words = find_split_words(ctx, original_str.as_str())?;
|
let split_words = find_split_words(ctx, original_str.as_str())?;
|
||||||
let self_mut = ctx.term_interner.get_mut(self);
|
let self_mut = ctx.term_interner.get_mut(self);
|
||||||
|
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ pub struct QueryTermSubset {
|
|||||||
pub struct QueryTerm {
|
pub struct QueryTerm {
|
||||||
original: Interned<String>,
|
original: Interned<String>,
|
||||||
ngram_words: Option<Vec<Interned<String>>>,
|
ngram_words: Option<Vec<Interned<String>>>,
|
||||||
max_nbr_typos: u8,
|
max_levenshtein_distance: u8,
|
||||||
is_prefix: bool,
|
is_prefix: bool,
|
||||||
zero_typo: ZeroTypoTerm,
|
zero_typo: ZeroTypoTerm,
|
||||||
// May not be computed yet
|
// May not be computed yet
|
||||||
@@ -342,10 +342,16 @@ impl QueryTermSubset {
|
|||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
pub fn max_nbr_typos(&self, ctx: &SearchContext) -> u8 {
|
pub fn max_typo_cost(&self, ctx: &SearchContext) -> u8 {
|
||||||
let t = ctx.term_interner.get(self.original);
|
let t = ctx.term_interner.get(self.original);
|
||||||
match t.max_nbr_typos {
|
match t.max_levenshtein_distance {
|
||||||
0 => 0,
|
0 => {
|
||||||
|
if t.allows_split_words() {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
}
|
||||||
1 => {
|
1 => {
|
||||||
if self.one_typo_subset.is_empty() {
|
if self.one_typo_subset.is_empty() {
|
||||||
0
|
0
|
||||||
@@ -438,6 +444,9 @@ impl QueryTerm {
|
|||||||
|
|
||||||
self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty()
|
self.zero_typo.is_empty() && one_typo.is_empty() && two_typo.is_empty()
|
||||||
}
|
}
|
||||||
|
fn allows_split_words(&self) -> bool {
|
||||||
|
self.zero_typo.phrase.is_none()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Interned<QueryTerm> {
|
impl Interned<QueryTerm> {
|
||||||
|
|||||||
@@ -77,13 +77,9 @@ pub fn located_query_terms_from_tokens(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
TokenKind::Separator(separator_kind) => {
|
TokenKind::Separator(separator_kind) => {
|
||||||
match separator_kind {
|
// add penalty for hard separators
|
||||||
SeparatorKind::Hard => {
|
if let SeparatorKind::Hard = separator_kind {
|
||||||
position += 1;
|
position = position.wrapping_add(1);
|
||||||
}
|
|
||||||
SeparatorKind::Soft => {
|
|
||||||
position += 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
phrase = 'phrase: {
|
phrase = 'phrase: {
|
||||||
@@ -217,7 +213,7 @@ pub fn make_ngram(
|
|||||||
original: ngram_str_interned,
|
original: ngram_str_interned,
|
||||||
ngram_words: Some(words_interned),
|
ngram_words: Some(words_interned),
|
||||||
is_prefix,
|
is_prefix,
|
||||||
max_nbr_typos,
|
max_levenshtein_distance: max_nbr_typos,
|
||||||
zero_typo: term.zero_typo,
|
zero_typo: term.zero_typo,
|
||||||
one_typo: Lazy::Uninit,
|
one_typo: Lazy::Uninit,
|
||||||
two_typo: Lazy::Uninit,
|
two_typo: Lazy::Uninit,
|
||||||
@@ -271,7 +267,7 @@ impl PhraseBuilder {
|
|||||||
QueryTerm {
|
QueryTerm {
|
||||||
original: ctx.word_interner.insert(phrase_desc),
|
original: ctx.word_interner.insert(phrase_desc),
|
||||||
ngram_words: None,
|
ngram_words: None,
|
||||||
max_nbr_typos: 0,
|
max_levenshtein_distance: 0,
|
||||||
is_prefix: false,
|
is_prefix: false,
|
||||||
zero_typo: ZeroTypoTerm {
|
zero_typo: ZeroTypoTerm {
|
||||||
phrase: Some(phrase),
|
phrase: Some(phrase),
|
||||||
@@ -288,3 +284,36 @@ impl PhraseBuilder {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use charabia::TokenizerBuilder;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
use crate::index::tests::TempIndex;
|
||||||
|
|
||||||
|
fn temp_index_with_documents() -> TempIndex {
|
||||||
|
let temp_index = TempIndex::new();
|
||||||
|
temp_index
|
||||||
|
.add_documents(documents!([
|
||||||
|
{ "id": 1, "name": "split this world westfali westfalia the Ŵôřlḑôle" },
|
||||||
|
{ "id": 2, "name": "Westfália" },
|
||||||
|
{ "id": 3, "name": "Ŵôřlḑôle" },
|
||||||
|
]))
|
||||||
|
.unwrap();
|
||||||
|
temp_index
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn start_with_hard_separator() -> Result<()> {
|
||||||
|
let tokenizer = TokenizerBuilder::new().build();
|
||||||
|
let tokens = tokenizer.tokenize(".");
|
||||||
|
let index = temp_index_with_documents();
|
||||||
|
let rtxn = index.read_txn()?;
|
||||||
|
let mut ctx = SearchContext::new(&index, &rtxn);
|
||||||
|
// panics with `attempt to add with overflow` before <https://github.com/meilisearch/meilisearch/issues/3785>
|
||||||
|
let located_query_terms = located_query_terms_from_tokens(&mut ctx, tokens, None)?;
|
||||||
|
assert!(located_query_terms.is_empty());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -209,7 +209,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| {
|
self.traverse_breadth_first_backward(self.query_graph.end_node, |cur_node| {
|
||||||
if cur_node == self.query_graph.end_node {
|
if cur_node == self.query_graph.end_node {
|
||||||
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
|
*costs_to_end.get_mut(self.query_graph.end_node) = vec![0];
|
||||||
return true;
|
return;
|
||||||
}
|
}
|
||||||
let mut self_costs = Vec::<u64>::new();
|
let mut self_costs = Vec::<u64>::new();
|
||||||
|
|
||||||
@@ -226,7 +226,6 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
self_costs.dedup();
|
self_costs.dedup();
|
||||||
|
|
||||||
*costs_to_end.get_mut(cur_node) = self_costs;
|
*costs_to_end.get_mut(cur_node) = self_costs;
|
||||||
true
|
|
||||||
});
|
});
|
||||||
costs_to_end
|
costs_to_end
|
||||||
}
|
}
|
||||||
@@ -236,6 +235,9 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
node_with_removed_outgoing_conditions: Interned<QueryNode>,
|
node_with_removed_outgoing_conditions: Interned<QueryNode>,
|
||||||
costs: &mut MappedInterner<QueryNode, Vec<u64>>,
|
costs: &mut MappedInterner<QueryNode, Vec<u64>>,
|
||||||
) {
|
) {
|
||||||
|
// Traverse the graph backward from the target node, recomputing the cost for each of its predecessors.
|
||||||
|
// We first check that no other node is contributing the same total cost to a predecessor before removing
|
||||||
|
// the cost from the predecessor.
|
||||||
self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| {
|
self.traverse_breadth_first_backward(node_with_removed_outgoing_conditions, |cur_node| {
|
||||||
let mut costs_to_remove = FxHashSet::default();
|
let mut costs_to_remove = FxHashSet::default();
|
||||||
costs_to_remove.extend(costs.get(cur_node).iter().copied());
|
costs_to_remove.extend(costs.get(cur_node).iter().copied());
|
||||||
@@ -246,19 +248,18 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
for cost in costs.get(edge.dest_node).iter() {
|
for cost in costs.get(edge.dest_node).iter() {
|
||||||
costs_to_remove.remove(&(*cost + edge.cost as u64));
|
costs_to_remove.remove(&(*cost + edge.cost as u64));
|
||||||
if costs_to_remove.is_empty() {
|
if costs_to_remove.is_empty() {
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if costs_to_remove.is_empty() {
|
if costs_to_remove.is_empty() {
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied());
|
let mut new_costs = BTreeSet::from_iter(costs.get(cur_node).iter().copied());
|
||||||
for c in costs_to_remove {
|
for c in costs_to_remove {
|
||||||
new_costs.remove(&c);
|
new_costs.remove(&c);
|
||||||
}
|
}
|
||||||
*costs.get_mut(cur_node) = new_costs.into_iter().collect();
|
*costs.get_mut(cur_node) = new_costs.into_iter().collect();
|
||||||
true
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -269,7 +270,7 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
pub fn traverse_breadth_first_backward(
|
pub fn traverse_breadth_first_backward(
|
||||||
&self,
|
&self,
|
||||||
from: Interned<QueryNode>,
|
from: Interned<QueryNode>,
|
||||||
mut visit: impl FnMut(Interned<QueryNode>) -> bool,
|
mut visit: impl FnMut(Interned<QueryNode>),
|
||||||
) {
|
) {
|
||||||
let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
let mut reachable = SmallBitmap::for_interned_values_in(&self.query_graph.nodes);
|
||||||
{
|
{
|
||||||
@@ -312,10 +313,9 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
unreachable_or_visited.insert(cur_node);
|
unreachable_or_visited.insert(cur_node);
|
||||||
if visit(cur_node) {
|
visit(cur_node);
|
||||||
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
for prev_node in self.query_graph.nodes.get(cur_node).predecessors.iter() {
|
||||||
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node)
|
if !enqueued.contains(prev_node) && !unreachable_or_visited.contains(prev_node) {
|
||||||
{
|
|
||||||
stack.push_back(prev_node);
|
stack.push_back(prev_node);
|
||||||
enqueued.insert(prev_node);
|
enqueued.insert(prev_node);
|
||||||
}
|
}
|
||||||
@@ -323,4 +323,3 @@ impl<G: RankingRuleGraphTrait> RankingRuleGraph<G> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
#![allow(clippy::too_many_arguments)]
|
#![allow(clippy::too_many_arguments)]
|
||||||
|
|
||||||
use super::ProximityCondition;
|
use super::ProximityCondition;
|
||||||
|
use crate::proximity::MAX_DISTANCE;
|
||||||
use crate::search::new::interner::{DedupInterner, Interned};
|
use crate::search::new::interner::{DedupInterner, Interned};
|
||||||
use crate::search::new::query_term::LocatedQueryTermSubset;
|
use crate::search::new::query_term::LocatedQueryTermSubset;
|
||||||
use crate::search::new::SearchContext;
|
use crate::search::new::SearchContext;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
const MAX_PROX: usize = crate::proximity::MAX_DISTANCE as usize;
|
|
||||||
|
|
||||||
pub fn build_edges(
|
pub fn build_edges(
|
||||||
_ctx: &mut SearchContext,
|
_ctx: &mut SearchContext,
|
||||||
conditions_interner: &mut DedupInterner<ProximityCondition>,
|
conditions_interner: &mut DedupInterner<ProximityCondition>,
|
||||||
@@ -37,7 +36,7 @@ pub fn build_edges(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut conditions = vec![];
|
let mut conditions = vec![];
|
||||||
for cost in right_ngram_length..(MAX_PROX + right_ngram_length) {
|
for cost in right_ngram_length..(7 + right_ngram_length) {
|
||||||
conditions.push((
|
conditions.push((
|
||||||
cost as u32,
|
cost as u32,
|
||||||
conditions_interner.insert(ProximityCondition::Uninit {
|
conditions_interner.insert(ProximityCondition::Uninit {
|
||||||
@@ -49,7 +48,7 @@ pub fn build_edges(
|
|||||||
}
|
}
|
||||||
|
|
||||||
conditions.push((
|
conditions.push((
|
||||||
(MAX_PROX + right_ngram_length) as u32,
|
MAX_DISTANCE - 1 + right_ngram_length as u32,
|
||||||
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
conditions_interner.insert(ProximityCondition::Term { term: right_term.clone() }),
|
||||||
));
|
));
|
||||||
|
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ impl RankingRuleGraphTrait for TypoGraph {
|
|||||||
// 3-gram -> equivalent to 2 typos
|
// 3-gram -> equivalent to 2 typos
|
||||||
let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 };
|
let base_cost = if term.term_ids.len() == 1 { 0 } else { term.term_ids.len() as u32 };
|
||||||
|
|
||||||
for nbr_typos in 0..=term.term_subset.max_nbr_typos(ctx) {
|
for nbr_typos in 0..=term.term_subset.max_typo_cost(ctx) {
|
||||||
let mut term = term.clone();
|
let mut term = term.clone();
|
||||||
match nbr_typos {
|
match nbr_typos {
|
||||||
0 => {
|
0 => {
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ fn test_attribute_position_simple() {
|
|||||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
s.query("quick brown");
|
s.query("quick brown");
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||||
}
|
}
|
||||||
#[test]
|
#[test]
|
||||||
fn test_attribute_position_repeated() {
|
fn test_attribute_position_repeated() {
|
||||||
@@ -163,7 +163,7 @@ fn test_attribute_position_different_fields() {
|
|||||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
s.query("quick brown");
|
s.query("quick brown");
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -176,5 +176,5 @@ fn test_attribute_position_ngrams() {
|
|||||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
s.query("quick brown");
|
s.query("quick brown");
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 3, 4, 2, 1, 0, 6, 8, 7, 9, 5]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[10, 11, 12, 13, 2, 3, 4, 1, 0, 6, 8, 7, 9, 5]");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,10 +11,11 @@ It doesn't test properly:
|
|||||||
- distinct attributes with arrays (because we know it's incorrect as well)
|
- distinct attributes with arrays (because we know it's incorrect as well)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use big_s::S;
|
use big_s::S;
|
||||||
use heed::RoTxn;
|
use heed::RoTxn;
|
||||||
use maplit::hashset;
|
use maplit::hashset;
|
||||||
use std::collections::HashSet;
|
|
||||||
|
|
||||||
use super::collect_field_values;
|
use super::collect_field_values;
|
||||||
use crate::index::tests::TempIndex;
|
use crate::index::tests::TempIndex;
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ This module tests the following properties:
|
|||||||
|
|
||||||
1. Two consecutive words from a query can be combined into a "2gram"
|
1. Two consecutive words from a query can be combined into a "2gram"
|
||||||
2. Three consecutive words from a query can be combined into a "3gram"
|
2. Three consecutive words from a query can be combined into a "3gram"
|
||||||
3. A word from the query can be split into two consecutive words (split words)
|
3. A word from the query can be split into two consecutive words (split words), no matter how short it is
|
||||||
4. A 2gram can be split into two words
|
4. A 2gram can be split into two words
|
||||||
5. A 3gram cannot be split into two words
|
5. A 3gram can be split into two words
|
||||||
6. 2grams can contain up to 1 typo
|
6. 2grams can contain up to 1 typo
|
||||||
7. 3grams cannot have typos
|
7. 3grams cannot have typos
|
||||||
8. 2grams and 3grams can be prefix tolerant
|
8. 2grams and 3grams can be prefix tolerant
|
||||||
@@ -14,6 +14,7 @@ This module tests the following properties:
|
|||||||
11. Disabling typo tolerance does not disable ngram tolerance
|
11. Disabling typo tolerance does not disable ngram tolerance
|
||||||
12. Prefix tolerance is disabled for the last word if a space follows it
|
12. Prefix tolerance is disabled for the last word if a space follows it
|
||||||
13. Ngrams cannot be formed by combining a phrase and a word or two phrases
|
13. Ngrams cannot be formed by combining a phrase and a word or two phrases
|
||||||
|
14. Split words are not disabled by the `disableOnAttribute` or `disableOnWords` typo settings
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use crate::index::tests::TempIndex;
|
use crate::index::tests::TempIndex;
|
||||||
@@ -56,6 +57,10 @@ fn create_index() -> TempIndex {
|
|||||||
{
|
{
|
||||||
"id": 5,
|
"id": 5,
|
||||||
"text": "sunflowering is not a verb"
|
"text": "sunflowering is not a verb"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"text": "xy z"
|
||||||
}
|
}
|
||||||
]))
|
]))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -263,11 +268,12 @@ fn test_disable_split_words() {
|
|||||||
s.query("sunflower ");
|
s.query("sunflower ");
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
// no document containing `sun flower`
|
// no document containing `sun flower`
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[3]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
"\"the sun flower is tall\"",
|
"\"the sun flower is tall\"",
|
||||||
|
"\"the sunflower is tall\"",
|
||||||
]
|
]
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
@@ -307,10 +313,11 @@ fn test_3gram_no_split_words() {
|
|||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
|
|
||||||
// no document with `sun flower`
|
// no document with `sun flower`
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[2, 3, 5]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 2, 3, 5]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
|
"\"the sun flower is tall\"",
|
||||||
"\"the sunflowers are pretty\"",
|
"\"the sunflowers are pretty\"",
|
||||||
"\"the sunflower is tall\"",
|
"\"the sunflower is tall\"",
|
||||||
"\"sunflowering is not a verb\"",
|
"\"sunflowering is not a verb\"",
|
||||||
@@ -369,3 +376,50 @@ fn test_no_ngram_phrases() {
|
|||||||
]
|
]
|
||||||
"###);
|
"###);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_short_split_words() {
|
||||||
|
let index = create_index();
|
||||||
|
let txn = index.read_txn().unwrap();
|
||||||
|
|
||||||
|
let mut s = Search::new(&txn, &index);
|
||||||
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
|
s.query("xyz");
|
||||||
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
|
|
||||||
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[6]");
|
||||||
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
|
[
|
||||||
|
"\"xy z\"",
|
||||||
|
]
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_split_words_never_disabled() {
|
||||||
|
let index = create_index();
|
||||||
|
|
||||||
|
index
|
||||||
|
.update_settings(|s| {
|
||||||
|
s.set_exact_words(["sunflower"].iter().map(ToString::to_string).collect());
|
||||||
|
s.set_exact_attributes(["text"].iter().map(ToString::to_string).collect());
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let txn = index.read_txn().unwrap();
|
||||||
|
|
||||||
|
let mut s = Search::new(&txn, &index);
|
||||||
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
|
s.query("the sunflower is tall");
|
||||||
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
|
|
||||||
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[1, 3]");
|
||||||
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
|
[
|
||||||
|
"\"the sun flower is tall\"",
|
||||||
|
"\"the sunflower is tall\"",
|
||||||
|
]
|
||||||
|
"###);
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ This module tests the following properties:
|
|||||||
6. A typo on the first letter of a word counts as two typos
|
6. A typo on the first letter of a word counts as two typos
|
||||||
7. Phrases are not typo tolerant
|
7. Phrases are not typo tolerant
|
||||||
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
|
8. 2grams can have 1 typo if they are larger than `min_word_len_two_typos`
|
||||||
9. 3grams are not typo tolerant
|
9. 3grams are not typo tolerant (but they can be split into two words)
|
||||||
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
|
10. The `typo` ranking rule assumes the role of the `words` ranking rule implicitly
|
||||||
if `words` doesn't exist before it.
|
if `words` doesn't exist before it.
|
||||||
11. The `typo` ranking rule places documents with the same number of typos in the same bucket
|
11. The `typo` ranking rule places documents with the same number of typos in the same bucket
|
||||||
@@ -287,15 +287,16 @@ fn test_typo_exact_word() {
|
|||||||
]
|
]
|
||||||
"###);
|
"###);
|
||||||
|
|
||||||
// exact words do not disable prefix (sunflowering OK, but no sunflowar or sun flower)
|
// exact words do not disable prefix (sunflowering OK, but no sunflowar)
|
||||||
let mut s = Search::new(&txn, &index);
|
let mut s = Search::new(&txn, &index);
|
||||||
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
s.terms_matching_strategy(TermsMatchingStrategy::All);
|
||||||
s.query("network interconnection sunflower");
|
s.query("network interconnection sunflower");
|
||||||
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
let SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 18]");
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[16, 17, 18]");
|
||||||
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
let texts = collect_field_values(&index, &txn, "text", &documents_ids);
|
||||||
insta::assert_debug_snapshot!(texts, @r###"
|
insta::assert_debug_snapshot!(texts, @r###"
|
||||||
[
|
[
|
||||||
|
"\"network interconnection sunflower\"",
|
||||||
"\"network interconnection sun flower\"",
|
"\"network interconnection sun flower\"",
|
||||||
"\"network interconnection sunflowering\"",
|
"\"network interconnection sunflowering\"",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -89,7 +89,6 @@ Create a snapshot test of the given database.
|
|||||||
- `exact_word_docids`
|
- `exact_word_docids`
|
||||||
- `word_prefix_docids`
|
- `word_prefix_docids`
|
||||||
- `exact_word_prefix_docids`
|
- `exact_word_prefix_docids`
|
||||||
- `docid_word_positions`
|
|
||||||
- `word_pair_proximity_docids`
|
- `word_pair_proximity_docids`
|
||||||
- `word_prefix_pair_proximity_docids`
|
- `word_prefix_pair_proximity_docids`
|
||||||
- `word_position_docids`
|
- `word_position_docids`
|
||||||
@@ -217,11 +216,6 @@ pub fn snap_exact_word_prefix_docids(index: &Index) -> String {
|
|||||||
&format!("{s:<16} {}", display_bitmap(&b))
|
&format!("{s:<16} {}", display_bitmap(&b))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
pub fn snap_docid_word_positions(index: &Index) -> String {
|
|
||||||
make_db_snap_from_iter!(index, docid_word_positions, |((idx, s), b)| {
|
|
||||||
&format!("{idx:<6} {s:<16} {}", display_bitmap(&b))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn snap_word_pair_proximity_docids(index: &Index) -> String {
|
pub fn snap_word_pair_proximity_docids(index: &Index) -> String {
|
||||||
make_db_snap_from_iter!(index, word_pair_proximity_docids, |((proximity, word1, word2), b)| {
|
make_db_snap_from_iter!(index, word_pair_proximity_docids, |((proximity, word1, word2), b)| {
|
||||||
&format!("{proximity:<2} {word1:<16} {word2:<16} {}", display_bitmap(&b))
|
&format!("{proximity:<2} {word1:<16} {word2:<16} {}", display_bitmap(&b))
|
||||||
@@ -477,9 +471,6 @@ macro_rules! full_snap_of_db {
|
|||||||
($index:ident, exact_word_prefix_docids) => {{
|
($index:ident, exact_word_prefix_docids) => {{
|
||||||
$crate::snapshot_tests::snap_exact_word_prefix_docids(&$index)
|
$crate::snapshot_tests::snap_exact_word_prefix_docids(&$index)
|
||||||
}};
|
}};
|
||||||
($index:ident, docid_word_positions) => {{
|
|
||||||
$crate::snapshot_tests::snap_docid_word_positions(&$index)
|
|
||||||
}};
|
|
||||||
($index:ident, word_pair_proximity_docids) => {{
|
($index:ident, word_pair_proximity_docids) => {{
|
||||||
$crate::snapshot_tests::snap_word_pair_proximity_docids(&$index)
|
$crate::snapshot_tests::snap_word_pair_proximity_docids(&$index)
|
||||||
}};
|
}};
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
prefix_word_pair_proximity_docids,
|
prefix_word_pair_proximity_docids,
|
||||||
@@ -80,7 +79,6 @@ impl<'t, 'u, 'i> ClearDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids.clear(self.wtxn)?;
|
exact_word_docids.clear(self.wtxn)?;
|
||||||
word_prefix_docids.clear(self.wtxn)?;
|
word_prefix_docids.clear(self.wtxn)?;
|
||||||
exact_word_prefix_docids.clear(self.wtxn)?;
|
exact_word_prefix_docids.clear(self.wtxn)?;
|
||||||
docid_word_positions.clear(self.wtxn)?;
|
|
||||||
word_pair_proximity_docids.clear(self.wtxn)?;
|
word_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
word_prefix_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
prefix_word_pair_proximity_docids.clear(self.wtxn)?;
|
prefix_word_pair_proximity_docids.clear(self.wtxn)?;
|
||||||
@@ -141,7 +139,6 @@ mod tests {
|
|||||||
|
|
||||||
assert!(index.word_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.word_prefix_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_prefix_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.docid_word_positions.is_empty(&rtxn).unwrap());
|
|
||||||
assert!(index.word_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.field_id_word_count_docids.is_empty(&rtxn).unwrap());
|
assert!(index.field_id_word_count_docids.is_empty(&rtxn).unwrap());
|
||||||
assert!(index.word_prefix_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
assert!(index.word_prefix_pair_proximity_docids.is_empty(&rtxn).unwrap());
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use std::collections::btree_map::Entry;
|
use std::collections::btree_map::Entry;
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||||
|
|
||||||
use fst::IntoStreamer;
|
use fst::IntoStreamer;
|
||||||
use heed::types::{ByteSlice, DecodeIgnore, Str, UnalignedSlice};
|
use heed::types::{ByteSlice, DecodeIgnore, Str, UnalignedSlice};
|
||||||
@@ -15,8 +15,7 @@ use crate::facet::FacetType;
|
|||||||
use crate::heed_codec::facet::FieldDocIdFacetCodec;
|
use crate::heed_codec::facet::FieldDocIdFacetCodec;
|
||||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||||
use crate::{
|
use crate::{
|
||||||
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec,
|
ExternalDocumentsIds, FieldId, FieldIdMapMissingEntry, Index, Result, RoaringBitmapCodec, BEU32,
|
||||||
SmallString32, BEU32,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct DeleteDocuments<'t, 'u, 'i> {
|
pub struct DeleteDocuments<'t, 'u, 'i> {
|
||||||
@@ -232,7 +231,6 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word_prefix_docids,
|
word_prefix_docids,
|
||||||
exact_word_prefix_docids,
|
exact_word_prefix_docids,
|
||||||
docid_word_positions,
|
|
||||||
word_pair_proximity_docids,
|
word_pair_proximity_docids,
|
||||||
field_id_word_count_docids,
|
field_id_word_count_docids,
|
||||||
word_prefix_pair_proximity_docids,
|
word_prefix_pair_proximity_docids,
|
||||||
@@ -251,23 +249,9 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
facet_id_is_empty_docids,
|
facet_id_is_empty_docids,
|
||||||
documents,
|
documents,
|
||||||
} = self.index;
|
} = self.index;
|
||||||
|
// Remove from the documents database
|
||||||
// Retrieve the words contained in the documents.
|
|
||||||
let mut words = Vec::new();
|
|
||||||
for docid in &self.to_delete_docids {
|
for docid in &self.to_delete_docids {
|
||||||
documents.delete(self.wtxn, &BEU32::new(docid))?;
|
documents.delete(self.wtxn, &BEU32::new(docid))?;
|
||||||
|
|
||||||
// We iterate through the words positions of the document id, retrieve the word and delete the positions.
|
|
||||||
// We create an iterator to be able to get the content and delete the key-value itself.
|
|
||||||
// It's faster to acquire a cursor to get and delete, as we avoid traversing the LMDB B-Tree two times but only once.
|
|
||||||
let mut iter = docid_word_positions.prefix_iter_mut(self.wtxn, &(docid, ""))?;
|
|
||||||
while let Some(result) = iter.next() {
|
|
||||||
let ((_docid, word), _positions) = result?;
|
|
||||||
// This boolean will indicate if we must remove this word from the words FST.
|
|
||||||
words.push((SmallString32::from(word), false));
|
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
|
||||||
unsafe { iter.del_current()? };
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// We acquire the current external documents ids map...
|
// We acquire the current external documents ids map...
|
||||||
// Note that its soft-deleted document ids field will be equal to the `to_delete_docids`
|
// Note that its soft-deleted document ids field will be equal to the `to_delete_docids`
|
||||||
@@ -278,42 +262,27 @@ impl<'t, 'u, 'i> DeleteDocuments<'t, 'u, 'i> {
|
|||||||
let new_external_documents_ids = new_external_documents_ids.into_static();
|
let new_external_documents_ids = new_external_documents_ids.into_static();
|
||||||
self.index.put_external_documents_ids(self.wtxn, &new_external_documents_ids)?;
|
self.index.put_external_documents_ids(self.wtxn, &new_external_documents_ids)?;
|
||||||
|
|
||||||
// Maybe we can improve the get performance of the words
|
let mut words_to_keep = BTreeSet::default();
|
||||||
// if we sort the words first, keeping the LMDB pages in cache.
|
let mut words_to_delete = BTreeSet::default();
|
||||||
words.sort_unstable();
|
|
||||||
|
|
||||||
// We iterate over the words and delete the documents ids
|
// We iterate over the words and delete the documents ids
|
||||||
// from the word docids database.
|
// from the word docids database.
|
||||||
for (word, must_remove) in &mut words {
|
|
||||||
remove_from_word_docids(
|
remove_from_word_docids(
|
||||||
self.wtxn,
|
self.wtxn,
|
||||||
word_docids,
|
word_docids,
|
||||||
word.as_str(),
|
|
||||||
must_remove,
|
|
||||||
&self.to_delete_docids,
|
&self.to_delete_docids,
|
||||||
|
&mut words_to_keep,
|
||||||
|
&mut words_to_delete,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
remove_from_word_docids(
|
remove_from_word_docids(
|
||||||
self.wtxn,
|
self.wtxn,
|
||||||
exact_word_docids,
|
exact_word_docids,
|
||||||
word.as_str(),
|
|
||||||
must_remove,
|
|
||||||
&self.to_delete_docids,
|
&self.to_delete_docids,
|
||||||
|
&mut words_to_keep,
|
||||||
|
&mut words_to_delete,
|
||||||
)?;
|
)?;
|
||||||
}
|
|
||||||
|
|
||||||
// We construct an FST set that contains the words to delete from the words FST.
|
// We construct an FST set that contains the words to delete from the words FST.
|
||||||
let words_to_delete =
|
let words_to_delete = fst::Set::from_iter(words_to_delete.difference(&words_to_keep))?;
|
||||||
words.iter().filter_map(
|
|
||||||
|(word, must_remove)| {
|
|
||||||
if *must_remove {
|
|
||||||
Some(word.as_str())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let words_to_delete = fst::Set::from_iter(words_to_delete)?;
|
|
||||||
|
|
||||||
let new_words_fst = {
|
let new_words_fst = {
|
||||||
// We retrieve the current words FST from the database.
|
// We retrieve the current words FST from the database.
|
||||||
@@ -532,23 +501,24 @@ fn remove_from_word_prefix_docids(
|
|||||||
fn remove_from_word_docids(
|
fn remove_from_word_docids(
|
||||||
txn: &mut heed::RwTxn,
|
txn: &mut heed::RwTxn,
|
||||||
db: &heed::Database<Str, RoaringBitmapCodec>,
|
db: &heed::Database<Str, RoaringBitmapCodec>,
|
||||||
word: &str,
|
|
||||||
must_remove: &mut bool,
|
|
||||||
to_remove: &RoaringBitmap,
|
to_remove: &RoaringBitmap,
|
||||||
|
words_to_keep: &mut BTreeSet<String>,
|
||||||
|
words_to_remove: &mut BTreeSet<String>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// We create an iterator to be able to get the content and delete the word docids.
|
// We create an iterator to be able to get the content and delete the word docids.
|
||||||
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
|
// It's faster to acquire a cursor to get and delete or put, as we avoid traversing
|
||||||
// the LMDB B-Tree two times but only once.
|
// the LMDB B-Tree two times but only once.
|
||||||
let mut iter = db.prefix_iter_mut(txn, word)?;
|
let mut iter = db.iter_mut(txn)?;
|
||||||
if let Some((key, mut docids)) = iter.next().transpose()? {
|
while let Some((key, mut docids)) = iter.next().transpose()? {
|
||||||
if key == word {
|
|
||||||
let previous_len = docids.len();
|
let previous_len = docids.len();
|
||||||
docids -= to_remove;
|
docids -= to_remove;
|
||||||
if docids.is_empty() {
|
if docids.is_empty() {
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
// safety: we don't keep references from inside the LMDB database.
|
||||||
unsafe { iter.del_current()? };
|
unsafe { iter.del_current()? };
|
||||||
*must_remove = true;
|
words_to_remove.insert(key.to_owned());
|
||||||
} else if docids.len() != previous_len {
|
} else {
|
||||||
|
words_to_keep.insert(key.to_owned());
|
||||||
|
if docids.len() != previous_len {
|
||||||
let key = key.to_owned();
|
let key = key.to_owned();
|
||||||
// safety: we don't keep references from inside the LMDB database.
|
// safety: we don't keep references from inside the LMDB database.
|
||||||
unsafe { iter.put_current(&key, &docids)? };
|
unsafe { iter.put_current(&key, &docids)? };
|
||||||
@@ -627,7 +597,7 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::index::tests::TempIndex;
|
use crate::index::tests::TempIndex;
|
||||||
use crate::{db_snap, Filter};
|
use crate::{db_snap, Filter, Search};
|
||||||
|
|
||||||
fn delete_documents<'t>(
|
fn delete_documents<'t>(
|
||||||
wtxn: &mut RwTxn<'t, '_>,
|
wtxn: &mut RwTxn<'t, '_>,
|
||||||
@@ -1199,4 +1169,52 @@ mod tests {
|
|||||||
DeletionStrategy::AlwaysSoft,
|
DeletionStrategy::AlwaysSoft,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn delete_words_exact_attributes() {
|
||||||
|
let index = TempIndex::new();
|
||||||
|
|
||||||
|
index
|
||||||
|
.update_settings(|settings| {
|
||||||
|
settings.set_primary_key(S("id"));
|
||||||
|
settings.set_searchable_fields(vec![S("text"), S("exact")]);
|
||||||
|
settings.set_exact_attributes(vec![S("exact")].into_iter().collect());
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
index
|
||||||
|
.add_documents(documents!([
|
||||||
|
{ "id": 0, "text": "hello" },
|
||||||
|
{ "id": 1, "exact": "hello"}
|
||||||
|
]))
|
||||||
|
.unwrap();
|
||||||
|
db_snap!(index, word_docids, 1, @r###"
|
||||||
|
hello [0, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, exact_word_docids, 1, @r###"
|
||||||
|
hello [1, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, words_fst, 1, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||||
|
|
||||||
|
let mut wtxn = index.write_txn().unwrap();
|
||||||
|
let deleted_internal_ids =
|
||||||
|
delete_documents(&mut wtxn, &index, &["1"], DeletionStrategy::AlwaysHard);
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
|
db_snap!(index, word_docids, 2, @r###"
|
||||||
|
hello [0, ]
|
||||||
|
"###);
|
||||||
|
db_snap!(index, exact_word_docids, 2, @"");
|
||||||
|
db_snap!(index, words_fst, 2, @"300000000000000001084cfcfc2ce1000000016000000090ea47f");
|
||||||
|
|
||||||
|
insta::assert_snapshot!(format!("{deleted_internal_ids:?}"), @"[1]");
|
||||||
|
let txn = index.read_txn().unwrap();
|
||||||
|
let words = index.words_fst(&txn).unwrap().into_stream().into_strs().unwrap();
|
||||||
|
insta::assert_snapshot!(format!("{words:?}"), @r###"["hello"]"###);
|
||||||
|
|
||||||
|
let mut s = Search::new(&txn, &index);
|
||||||
|
s.query("hello");
|
||||||
|
let crate::SearchResult { documents_ids, .. } = s.execute().unwrap();
|
||||||
|
insta::assert_snapshot!(format!("{documents_ids:?}"), @"[0]");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -261,7 +261,9 @@ pub(crate) mod test_helpers {
|
|||||||
let options = options.map_size(4096 * 4 * 1000 * 100);
|
let options = options.map_size(4096 * 4 * 1000 * 100);
|
||||||
let tempdir = tempfile::TempDir::new().unwrap();
|
let tempdir = tempfile::TempDir::new().unwrap();
|
||||||
let env = options.open(tempdir.path()).unwrap();
|
let env = options.open(tempdir.path()).unwrap();
|
||||||
let content = env.create_database(None).unwrap();
|
let mut wtxn = env.write_txn().unwrap();
|
||||||
|
let content = env.create_database(&mut wtxn, None).unwrap();
|
||||||
|
wtxn.commit().unwrap();
|
||||||
|
|
||||||
FacetIndex {
|
FacetIndex {
|
||||||
content,
|
content,
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ fn document_word_positions_into_sorter(
|
|||||||
while !word_positions_heap.is_empty() {
|
while !word_positions_heap.is_empty() {
|
||||||
while let Some(peeked_word_position) = word_positions_heap.pop() {
|
while let Some(peeked_word_position) = word_positions_heap.pop() {
|
||||||
ordered_peeked_word_positions.push(peeked_word_position);
|
ordered_peeked_word_positions.push(peeked_word_position);
|
||||||
if ordered_peeked_word_positions.len() == (MAX_DISTANCE - 1) as usize {
|
if ordered_peeked_word_positions.len() == MAX_DISTANCE as usize - 1 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -325,8 +325,6 @@ fn send_and_extract_flattened_documents_data(
|
|||||||
// send docid_word_positions_chunk to DB writer
|
// send docid_word_positions_chunk to DB writer
|
||||||
let docid_word_positions_chunk =
|
let docid_word_positions_chunk =
|
||||||
unsafe { as_cloneable_grenad(&docid_word_positions_chunk)? };
|
unsafe { as_cloneable_grenad(&docid_word_positions_chunk)? };
|
||||||
let _ = lmdb_writer_sx
|
|
||||||
.send(Ok(TypedChunk::DocidWordPositions(docid_word_positions_chunk.clone())));
|
|
||||||
|
|
||||||
let _ =
|
let _ =
|
||||||
lmdb_writer_sx.send(Ok(TypedChunk::ScriptLanguageDocids(script_language_pair)));
|
lmdb_writer_sx.send(Ok(TypedChunk::ScriptLanguageDocids(script_language_pair)));
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ use std::result::Result as StdResult;
|
|||||||
|
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::read_u32_ne_bytes;
|
|
||||||
use crate::heed_codec::CboRoaringBitmapCodec;
|
use crate::heed_codec::CboRoaringBitmapCodec;
|
||||||
use crate::update::index_documents::transform::Operation;
|
use crate::update::index_documents::transform::Operation;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
@@ -22,10 +21,6 @@ pub fn concat_u32s_array<'a>(_key: &[u8], values: &[Cow<'a, [u8]>]) -> Result<Co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn roaring_bitmap_from_u32s_array(slice: &[u8]) -> RoaringBitmap {
|
|
||||||
read_u32_ne_bytes(slice).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize_roaring_bitmap(bitmap: &RoaringBitmap, buffer: &mut Vec<u8>) -> io::Result<()> {
|
pub fn serialize_roaring_bitmap(bitmap: &RoaringBitmap, buffer: &mut Vec<u8>) -> io::Result<()> {
|
||||||
buffer.clear();
|
buffer.clear();
|
||||||
buffer.reserve(bitmap.serialized_size());
|
buffer.reserve(bitmap.serialized_size());
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ pub use grenad_helpers::{
|
|||||||
};
|
};
|
||||||
pub use merge_functions::{
|
pub use merge_functions::{
|
||||||
concat_u32s_array, keep_first, keep_latest_obkv, merge_cbo_roaring_bitmaps,
|
concat_u32s_array, keep_first, keep_latest_obkv, merge_cbo_roaring_bitmaps,
|
||||||
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs,
|
merge_obkvs_and_operations, merge_roaring_bitmaps, merge_two_obkvs, serialize_roaring_bitmap,
|
||||||
roaring_bitmap_from_u32s_array, serialize_roaring_bitmap, MergeFn,
|
MergeFn,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::MAX_WORD_LENGTH;
|
use crate::MAX_WORD_LENGTH;
|
||||||
|
|||||||
@@ -2513,6 +2513,5 @@ mod tests {
|
|||||||
|
|
||||||
db_snap!(index, word_fid_docids, 3, @"4c2e2a1832e5802796edc1638136d933");
|
db_snap!(index, word_fid_docids, 3, @"4c2e2a1832e5802796edc1638136d933");
|
||||||
db_snap!(index, word_position_docids, 3, @"74f556b91d161d997a89468b4da1cb8f");
|
db_snap!(index, word_position_docids, 3, @"74f556b91d161d997a89468b4da1cb8f");
|
||||||
db_snap!(index, docid_word_positions, 3, @"5287245332627675740b28bd46e1cde1");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,24 +7,19 @@ use std::io;
|
|||||||
use charabia::{Language, Script};
|
use charabia::{Language, Script};
|
||||||
use grenad::MergerBuilder;
|
use grenad::MergerBuilder;
|
||||||
use heed::types::ByteSlice;
|
use heed::types::ByteSlice;
|
||||||
use heed::{BytesDecode, RwTxn};
|
use heed::RwTxn;
|
||||||
use roaring::RoaringBitmap;
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
use super::helpers::{
|
use super::helpers::{
|
||||||
self, merge_ignore_values, roaring_bitmap_from_u32s_array, serialize_roaring_bitmap,
|
self, merge_ignore_values, serialize_roaring_bitmap, valid_lmdb_key, CursorClonableMmap,
|
||||||
valid_lmdb_key, CursorClonableMmap,
|
|
||||||
};
|
};
|
||||||
use super::{ClonableMmap, MergeFn};
|
use super::{ClonableMmap, MergeFn};
|
||||||
use crate::facet::FacetType;
|
use crate::facet::FacetType;
|
||||||
use crate::update::facet::FacetsUpdate;
|
use crate::update::facet::FacetsUpdate;
|
||||||
use crate::update::index_documents::helpers::as_cloneable_grenad;
|
use crate::update::index_documents::helpers::as_cloneable_grenad;
|
||||||
use crate::{
|
use crate::{lat_lng_to_xyz, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index, Result};
|
||||||
lat_lng_to_xyz, BoRoaringBitmapCodec, CboRoaringBitmapCodec, DocumentId, GeoPoint, Index,
|
|
||||||
Result,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) enum TypedChunk {
|
pub(crate) enum TypedChunk {
|
||||||
DocidWordPositions(grenad::Reader<CursorClonableMmap>),
|
|
||||||
FieldIdDocidFacetStrings(grenad::Reader<CursorClonableMmap>),
|
FieldIdDocidFacetStrings(grenad::Reader<CursorClonableMmap>),
|
||||||
FieldIdDocidFacetNumbers(grenad::Reader<CursorClonableMmap>),
|
FieldIdDocidFacetNumbers(grenad::Reader<CursorClonableMmap>),
|
||||||
Documents(grenad::Reader<CursorClonableMmap>),
|
Documents(grenad::Reader<CursorClonableMmap>),
|
||||||
@@ -56,29 +51,6 @@ pub(crate) fn write_typed_chunk_into_index(
|
|||||||
) -> Result<(RoaringBitmap, bool)> {
|
) -> Result<(RoaringBitmap, bool)> {
|
||||||
let mut is_merged_database = false;
|
let mut is_merged_database = false;
|
||||||
match typed_chunk {
|
match typed_chunk {
|
||||||
TypedChunk::DocidWordPositions(docid_word_positions_iter) => {
|
|
||||||
write_entries_into_database(
|
|
||||||
docid_word_positions_iter,
|
|
||||||
&index.docid_word_positions,
|
|
||||||
wtxn,
|
|
||||||
index_is_empty,
|
|
||||||
|value, buffer| {
|
|
||||||
// ensure that values are unique and ordered
|
|
||||||
let positions = roaring_bitmap_from_u32s_array(value);
|
|
||||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
|
||||||
Ok(buffer)
|
|
||||||
},
|
|
||||||
|new_values, db_values, buffer| {
|
|
||||||
let new_values = roaring_bitmap_from_u32s_array(new_values);
|
|
||||||
let positions = match BoRoaringBitmapCodec::bytes_decode(db_values) {
|
|
||||||
Some(db_values) => new_values | db_values,
|
|
||||||
None => new_values, // should not happen
|
|
||||||
};
|
|
||||||
BoRoaringBitmapCodec::serialize_into(&positions, buffer);
|
|
||||||
Ok(())
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
TypedChunk::Documents(obkv_documents_iter) => {
|
TypedChunk::Documents(obkv_documents_iter) => {
|
||||||
let mut cursor = obkv_documents_iter.into_cursor()?;
|
let mut cursor = obkv_documents_iter.into_cursor()?;
|
||||||
while let Some((key, value)) = cursor.move_on_next()? {
|
while let Some((key, value)) = cursor.move_on_next()? {
|
||||||
|
|||||||
Reference in New Issue
Block a user