Compare commits

...

27 Commits

Author SHA1 Message Date
Louis Dureuil
4492bbb5f9 Greatly simplify implementation by eschewing the pipe 2025-10-06 21:36:54 +02:00
Louis Dureuil
a2fc7ae5e8 Use a buffered reader 2025-10-06 14:36:48 +02:00
Louis Dureuil
b817f58991 Only persist the archive in finish 2025-10-06 14:36:28 +02:00
Louis Dureuil
c885171029 process: add cancelling points in process snapshot 2025-10-02 17:16:05 +02:00
Louis Dureuil
3870a374af Compression: implement cancellation and change env copy method 2025-10-02 16:56:25 +02:00
Louis Dureuil
d41716d8f0 Add MustStopProcessing::as_lambda 2025-10-02 16:50:44 +02:00
Louis Dureuil
43a6505435 Use PipedArchiveBuilder to process snapshots without compaction 2025-10-02 11:18:54 +02:00
Louis Dureuil
467e15d9c0 WIP: Add PipedArchiveBuilder 2025-10-02 11:18:13 +02:00
Louis Dureuil
91275adb76 Add necessary accessors 2025-10-02 11:12:51 +02:00
Many the fish
c29bdcae23 Merge pull request #5913 from meilisearch/dependabot/github_actions/actions/setup-python-6
Bump actions/setup-python from 5 to 6
2025-09-29 14:58:45 +00:00
Many the fish
75219181a3 Merge pull request #5834 from meilisearch/fix-openapi-ci
Minor improvement in OpenAPI CI
2025-09-29 13:55:12 +00:00
Many the fish
a5b5cf7cd1 Merge pull request #5916 from meilisearch/dependabot/github_actions/sigstore/cosign-installer-3.10.0
Bump sigstore/cosign-installer from 3.9.2 to 3.10.0
2025-09-29 13:52:31 +00:00
Many the fish
142ba8ea00 Merge pull request #5915 from meilisearch/dependabot/github_actions/actions/setup-node-5
Bump actions/setup-node from 4 to 5
2025-09-29 13:52:28 +00:00
Many the fish
4bc823e07c Merge pull request #5914 from meilisearch/dependabot/github_actions/actions/setup-dotnet-5
Bump actions/setup-dotnet from 4 to 5
2025-09-29 13:52:10 +00:00
Many the fish
db06ca7138 Merge pull request #5912 from meilisearch/dependabot/github_actions/actions/setup-go-6
Bump actions/setup-go from 5 to 6
2025-09-29 13:52:06 +00:00
Clément Renault
95595a768e Merge pull request #5911 from EazyAl/main
Update README.md to fix newsletter link
2025-09-29 13:10:16 +00:00
dependabot[bot]
36f649768e Bump sigstore/cosign-installer from 3.9.2 to 3.10.0
Bumps [sigstore/cosign-installer](https://github.com/sigstore/cosign-installer) from 3.9.2 to 3.10.0.
- [Release notes](https://github.com/sigstore/cosign-installer/releases)
- [Commits](d58896d6a1...d7543c93d8)

---
updated-dependencies:
- dependency-name: sigstore/cosign-installer
  dependency-version: 3.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:14 +00:00
dependabot[bot]
0c6fc243f2 Bump actions/setup-node from 4 to 5
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4 to 5.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-node
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:11 +00:00
dependabot[bot]
dfc46d5627 Bump actions/setup-dotnet from 4 to 5
Bumps [actions/setup-dotnet](https://github.com/actions/setup-dotnet) from 4 to 5.
- [Release notes](https://github.com/actions/setup-dotnet/releases)
- [Commits](https://github.com/actions/setup-dotnet/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-dotnet
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:08 +00:00
dependabot[bot]
11d55f2121 Bump actions/setup-python from 5 to 6
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:03 +00:00
dependabot[bot]
014da57cf6 Bump actions/setup-go from 5 to 6
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5 to 6.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:01:00 +00:00
Clément Renault
70a0ff4a8f Merge pull request #5900 from meilisearch/show-dependencies
Show Dependabot dependency upgrade in the changelog
2025-09-25 16:04:03 +00:00
Clément Renault
dd0d5e4b90 Merge pull request #5910 from meilisearch/curquiza-patch-1
Change Java version in SDK CI
2025-09-25 14:32:16 +00:00
Ali Imran
15b3bb1700 Update README.md to fix newsletter link 2025-09-25 16:07:08 +02:00
Clémentine
f25db0795e Change Java version in SDK CI
Updated Java version and distribution in workflow.
2025-09-25 15:03:50 +02:00
curquiza
6f0d26c22c Show dependency upgrade in the changelog for full transparency 2025-09-22 18:30:34 +02:00
curquiza
d52c7dcc94 Add needs: check-version 2025-08-12 20:47:43 +02:00
12 changed files with 304 additions and 32 deletions

View File

@@ -7,6 +7,5 @@ updates:
schedule:
interval: "monthly"
labels:
- 'skip changelog'
- 'dependencies'
rebase-strategy: disabled

View File

@@ -18,6 +18,7 @@ categories:
label: 'security'
- title: '⚙️ Maintenance/misc'
label:
- 'dependencies'
- 'maintenance'
- 'documentation'
template: |
@@ -26,8 +27,3 @@ template: |
❤️ Huge thanks to our contributors: $CONTRIBUTORS.
no-changes-template: 'Changes are coming soon 😎'
sort-direction: 'ascending'
replacers:
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
replace: ''
- search: '/(?:and )?@dependabot(?:\[bot\])?,?/g'
replace: ''

View File

@@ -65,7 +65,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Install cosign
uses: sigstore/cosign-installer@d58896d6a1865668819e1d91763c7751a165e159 # tag=v3.9.2
uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # tag=v3.10.0
- name: Login to Docker Hub
uses: docker/login-action@v3

View File

@@ -11,7 +11,7 @@ jobs:
check-version:
name: Check the version validity
runs-on: ubuntu-latest
# No need to check the version for dry run (cron)
# No need to check the version for dry run (cron or workflow_dispatch)
steps:
- uses: actions/checkout@v5
# Check if the tag has the v<nmumber>.<number>.<number> format.
@@ -48,7 +48,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -78,7 +78,7 @@ jobs:
- uses: dtolnay/rust-toolchain@1.89
- name: Build
run: cargo build --release --locked
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
- name: Upload binaries to release
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
@@ -111,7 +111,7 @@ jobs:
command: build
args: --release --target ${{ matrix.target }}
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -176,7 +176,7 @@ jobs:
- name: List target output files
run: ls -lR ./target
- name: Upload the binary to release
# No need to upload binaries for dry run (cron)
# No need to upload binaries for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:
@@ -187,6 +187,7 @@ jobs:
publish-openapi-file:
name: Publish OpenAPI file
needs: check-version
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -201,7 +202,7 @@ jobs:
cd crates/openapi-generator
cargo run --release -- --pretty --output ../../meilisearch.json
- name: Upload OpenAPI to Release
# No need to upload for dry run (cron)
# No need to upload for dry run (cron or workflow_dispatch)
if: github.event_name == 'release'
uses: svenstaro/upload-release-action@2.11.2
with:

View File

@@ -50,7 +50,7 @@ jobs:
with:
repository: meilisearch/meilisearch-dotnet
- name: Setup .NET Core
uses: actions/setup-dotnet@v4
uses: actions/setup-dotnet@v5
with:
dotnet-version: "8.0.x"
- name: Install dependencies
@@ -100,7 +100,7 @@ jobs:
- '7700:7700'
steps:
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: stable
- uses: actions/checkout@v5
@@ -135,13 +135,13 @@ jobs:
- name: Set up Java
uses: actions/setup-java@v5
with:
java-version: 8
distribution: 'zulu'
java-version: 17
distribution: 'temurin'
cache: gradle
- name: Grant execute permission for gradlew
run: chmod +x gradlew
- name: Build and run unit and integration tests
run: ./gradlew build integrationTest
run: ./gradlew build integrationTest --info
meilisearch-js-tests:
needs: define-docker-image
@@ -160,7 +160,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js
- name: Setup node
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
cache: 'yarn'
- name: Install dependencies
@@ -224,7 +224,7 @@ jobs:
with:
repository: meilisearch/meilisearch-python
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
- name: Install pipenv
uses: dschep/install-pipenv-action@v1
- name: Install dependencies
@@ -318,7 +318,7 @@ jobs:
with:
repository: meilisearch/meilisearch-js-plugins
- name: Setup node
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
cache: yarn
- name: Install dependencies

View File

@@ -121,7 +121,7 @@ If you want to know more about the kind of data we collect and what we use it fo
Meilisearch is a search engine created by [Meili](https://www.meilisearch.com/careers), a software development company headquartered in France and with team members all over the world. Want to know more about us? [Check out our blog!](https://blog.meilisearch.com/?utm_campaign=oss&utm_source=github&utm_medium=meilisearch&utm_content=contact)
🗞 [Subscribe to our newsletter](https://meilisearch.us2.list-manage.com/subscribe?u=27870f7b71c908a8b359599fb&id=79582d828e) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
🗞 [Subscribe to our newsletter](https://share-eu1.hsforms.com/1LN5N0x_GQgq7ss7tXmSykwfg3aq) if you don't want to miss any updates! We promise we won't clutter your mailbox: we only send one edition every two months.
💌 Want to make a suggestion or give feedback? Here are some of the channels where you can reach us:

View File

@@ -33,6 +33,10 @@ impl FileStore {
std::fs::create_dir_all(&path)?;
Ok(FileStore { path })
}
pub fn path(&self) -> &Path {
&self.path
}
}
impl FileStore {

View File

@@ -1,4 +1,4 @@
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use std::time::Duration;
use std::{fs, thread};
@@ -591,4 +591,8 @@ impl IndexMapper {
pub fn set_currently_updating_index(&self, index: Option<(String, Index)>) {
*self.currently_updating_index.write().unwrap() = index;
}
pub fn base_path(&self) -> &Path {
&self.base_path
}
}

View File

@@ -50,6 +50,11 @@ impl MustStopProcessing {
pub fn reset(&self) {
self.0.store(false, Ordering::Relaxed);
}
pub fn as_lambda(&self) -> impl Fn() -> bool + Send + Sync + 'static {
let clone = self.clone();
move || clone.get()
}
}
pub struct Scheduler {

View File

@@ -4,6 +4,7 @@ use std::sync::atomic::Ordering;
use meilisearch_types::heed::CompactionOption;
use meilisearch_types::milli::progress::{Progress, VariableNameStep};
use meilisearch_types::milli::InternalError;
use meilisearch_types::tasks::{Status, Task};
use meilisearch_types::{compression, VERSION_FILE_NAME};
@@ -76,6 +77,22 @@ unsafe fn remove_tasks(
impl IndexScheduler {
pub(super) fn process_snapshot(
&self,
progress: Progress,
tasks: Vec<Task>,
) -> Result<Vec<Task>> {
let compaction_option = if self.scheduler.experimental_no_snapshot_compaction {
CompactionOption::Disabled
} else {
CompactionOption::Enabled
};
match compaction_option {
CompactionOption::Enabled => self.process_snapshot_with_temp(progress, tasks),
CompactionOption::Disabled => self.process_snapshot_with_pipe(progress, tasks),
}
}
fn process_snapshot_with_temp(
&self,
progress: Progress,
mut tasks: Vec<Task>,
@@ -105,12 +122,8 @@ impl IndexScheduler {
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
let dst = temp_snapshot_dir.path().join("tasks");
fs::create_dir_all(&dst)?;
let compaction_option = if self.scheduler.experimental_no_snapshot_compaction {
CompactionOption::Disabled
} else {
CompactionOption::Enabled
};
self.env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
self.env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
// 2.2 Remove the current snapshot tasks
//
@@ -161,7 +174,7 @@ impl IndexScheduler {
let dst = temp_snapshot_dir.path().join("indexes").join(uuid.to_string());
fs::create_dir_all(&dst)?;
index
.copy_to_path(dst.join("data.mdb"), compaction_option)
.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)
.map_err(|e| Error::from_milli(e, Some(name.to_string())))?;
}
@@ -171,7 +184,7 @@ impl IndexScheduler {
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
let dst = temp_snapshot_dir.path().join("auth");
fs::create_dir_all(&dst)?;
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), compaction_option)?;
self.scheduler.auth_env.copy_to_path(dst.join("data.mdb"), CompactionOption::Enabled)?;
// 5. Copy and tarball the flat snapshot
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
@@ -206,4 +219,138 @@ impl IndexScheduler {
Ok(tasks)
}
fn process_snapshot_with_pipe(
&self,
progress: Progress,
mut tasks: Vec<Task>,
) -> Result<Vec<Task>> {
progress.update_progress(SnapshotCreationProgress::StartTheSnapshotCreation);
let must_stop_processing = &self.scheduler.must_stop_processing;
let abort_no_index = Err(Error::from_milli(InternalError::AbortedIndexation.into(), None));
fs::create_dir_all(&self.scheduler.snapshots_path)?;
// 1. Find the base path and original name of the database
// TODO find a better way to get this path
let mut base_path = self.env.path().to_owned();
base_path.pop();
let base_path = base_path;
let db_name = base_path.file_name().and_then(OsStr::to_str).unwrap_or("data.ms");
// 2. Start the tarball builder. The tarball will be created on another thread from piped data.
let mut builder = compression::PipedArchiveBuilder::new(
self.scheduler.snapshots_path.clone(),
base_path.clone(),
)?;
// 3. Snapshot the VERSION file
builder.add_file_to_archive(self.scheduler.version_file_path.clone())?;
if must_stop_processing.get() {
return abort_no_index;
}
// 4. Snapshot the index-scheduler LMDB env
//
// When we call copy_to_path, LMDB opens a read transaction by itself,
// we can't provide our own. It is an issue as we would like to know
// the update files to copy but new ones can be enqueued between the copy
// of the env and the new transaction we open to retrieve the enqueued tasks.
// So we prefer opening a new transaction after copying the env and copy more
// update files than not enough.
//
// Note that there cannot be any update files deleted between those
// two read operations as the task processing is synchronous.
// 4.1 First copy the LMDB env of the index-scheduler
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexScheduler);
builder.add_env_to_archive(&self.env)?;
if must_stop_processing.get() {
return abort_no_index;
}
// 4.2 Create a read transaction on the index-scheduler
let rtxn = self.env.read_txn()?;
// 4.3 Only copy the update files of the enqueued tasks
progress.update_progress(SnapshotCreationProgress::SnapshotTheUpdateFiles);
builder.add_dir_to_archive(self.queue.file_store.path().to_path_buf())?;
let enqueued = self.queue.tasks.get_status(&rtxn, Status::Enqueued)?;
let (atomic, update_file_progress) = AtomicUpdateFileStep::new(enqueued.len() as u32);
progress.update_progress(update_file_progress);
for task_id in enqueued {
if must_stop_processing.get() {
return abort_no_index;
}
let task =
self.queue.tasks.get_task(&rtxn, task_id)?.ok_or(Error::CorruptedTaskQueue)?;
if let Some(content_uuid) = task.content_uuid() {
let src = self.queue.file_store.get_update_path(content_uuid);
builder.add_file_to_archive(src)?;
}
atomic.fetch_add(1, Ordering::Relaxed);
}
// 5. Snapshot every index
progress.update_progress(SnapshotCreationProgress::SnapshotTheIndexes);
builder.add_dir_to_archive(self.index_mapper.base_path().to_path_buf())?;
let index_mapping = self.index_mapper.index_mapping;
let nb_indexes = index_mapping.len(&rtxn)? as u32;
for (i, result) in index_mapping.iter(&rtxn)?.enumerate() {
let (name, _) = result?;
let abort_index = || {
Err(Error::from_milli(
InternalError::AbortedIndexation.into(),
Some(name.to_string()), // defer the `to_string`
))
};
if must_stop_processing.get() {
return abort_index();
}
progress.update_progress(VariableNameStep::<SnapshotCreationProgress>::new(
name, i as u32, nb_indexes,
));
let index = self.index_mapper.index(&rtxn, name)?;
builder.add_env_to_archive(index.raw_env())?;
}
drop(rtxn);
if must_stop_processing.get() {
return abort_no_index;
}
// 6. Snapshot the auth LMDB env
progress.update_progress(SnapshotCreationProgress::SnapshotTheApiKeys);
builder.add_env_to_archive(&self.scheduler.auth_env)?;
// 7. Finalize the tarball
progress.update_progress(SnapshotCreationProgress::CreateTheTarball);
let file =
builder.finish(&self.scheduler.snapshots_path.join(format!("{db_name}.snapshot")))?;
// 8. Change the permission to make the snapshot readonly
let mut permissions = file.metadata()?.permissions();
permissions.set_readonly(true);
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
#[allow(clippy::non_octal_unix_permissions)]
// rwxrwxrwx
permissions.set_mode(0b100100100);
}
file.set_permissions(permissions)?;
for task in &mut tasks {
task.status = Status::Succeeded;
}
Ok(tasks)
}
}

View File

@@ -1,11 +1,13 @@
use std::fs::{create_dir_all, File};
use std::io::Write;
use std::path::Path;
use std::path::{Path, PathBuf};
use flate2::read::GzDecoder;
use flate2::write::GzEncoder;
use flate2::Compression;
use milli::heed::Env;
use tar::{Archive, Builder};
use tempfile::NamedTempFile;
pub fn to_tar_gz(src: impl AsRef<Path>, dest: impl AsRef<Path>) -> anyhow::Result<()> {
let mut f = File::create(dest)?;
@@ -26,3 +28,112 @@ pub fn from_tar_gz(src: impl AsRef<Path>, dest: impl AsRef<Path>) -> anyhow::Res
ar.unpack(&dest)?;
Ok(())
}
pub struct PipedArchiveBuilder {
base_path: PathBuf,
tar_encoder: tar::Builder<GzEncoder<NamedTempFile>>,
}
impl PipedArchiveBuilder {
pub fn new(dest_dir: PathBuf, base_path: PathBuf) -> anyhow::Result<Self> {
let temp_archive = tempfile::NamedTempFile::new_in(&dest_dir)?;
let gz_encoder = GzEncoder::new(temp_archive, Compression::default());
let mut tar_encoder = Builder::new(gz_encoder);
let base_path_in_archive = PathInArchive::from_absolute_and_base(&base_path, &base_path);
tar_encoder.append_dir(base_path_in_archive.as_path(), &base_path)?;
Ok(Self { base_path, tar_encoder })
}
/// Add a heed environment to the archive.
///
/// # Errors
///
/// - Errors originating with that thread:
/// - Heed errors, if taking a write transaction fails
/// - If the copy of the environment fails.
/// - If there is an I/O error opening the database at the environment's path.
/// - Errors originating with another thread:
/// - If the cancellation thread panicked or otherwise dropped its receiver.
/// - If the processing thread panicked or otherwise dropped its receiver.
pub fn add_env_to_archive<T>(&mut self, env: &Env<T>) -> anyhow::Result<()> {
let path = env.path().to_path_buf();
// make sure that the environment cannot change while it is being added to the archive,
// as any concurrent change would corrupt the copy.
let env_wtxn = env.write_txn()?;
let dir_path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
self.tar_encoder.append_dir(dir_path_in_archive.as_path(), &path)?;
let path = path.join("data.mdb");
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
self.tar_encoder.append_path_with_name(&path, path_in_archive.as_path())?;
// no change we might want to commit
env_wtxn.abort();
Ok(())
}
/// Add a file to the archive
///
/// # Errors
///
/// - If the processing thread panicked or otherwise dropped its receiver.
pub fn add_file_to_archive(&mut self, path: PathBuf) -> anyhow::Result<()> {
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
self.tar_encoder.append_path_with_name(&path, path_in_archive.as_path())?;
Ok(())
}
/// Add a directory name (**without its contents**) to the archive.
///
/// # Errors
///
/// - If the processing thread panicked or otherwise dropped its receiver.
pub fn add_dir_to_archive(&mut self, path: PathBuf) -> anyhow::Result<()> {
let path_in_archive = PathInArchive::from_absolute_and_base(&path, &self.base_path);
self.tar_encoder.append_dir(path_in_archive.as_path(), &path)?;
Ok(())
}
/// Finalize the archive and persists it to disk.
///
/// # Errors
///
/// - Originating with the current thread:
/// - If persisting the archive fails
/// - Originating with another thread:
/// - If the cancellation thread panicked.
/// - If the processing thread panicked or otherwise terminated in error.
pub fn finish(self, dest_path: &Path) -> anyhow::Result<File> {
let gz_encoder = self.tar_encoder.into_inner()?;
let mut temp_archive = gz_encoder.finish()?;
temp_archive.flush()?;
let archive = temp_archive.persist(dest_path)?;
Ok(archive)
}
}
struct PathInArchive(PathBuf);
impl PathInArchive {
pub fn from_absolute_and_base(absolute: &Path, base: &Path) -> Self {
/// FIXME
let canonical = absolute.canonicalize().unwrap();
let relative = match canonical.strip_prefix(base) {
Ok(stripped) => Path::new(&".").join(stripped),
Err(_) => absolute.to_path_buf(),
};
Self(relative)
}
pub fn as_path(&self) -> &Path {
self.0.as_path()
}
}

View File

@@ -1983,6 +1983,11 @@ impl Index {
Ok(sizes)
}
/// The underlying env for raw access
pub fn raw_env(&self) -> &heed::Env<WithoutTls> {
&self.env
}
}
pub struct EmbeddingsWithMetadata {