mirror of
https://github.com/meilisearch/meilisearch.git
synced 2025-09-07 05:06:31 +00:00
Remove lots of Arcs
This commit is contained in:
@ -1,5 +1,4 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use deserr::Deserr;
|
||||
@ -170,7 +169,7 @@ impl Embedder {
|
||||
&self,
|
||||
texts: Vec<String>,
|
||||
deadline: Option<Instant>,
|
||||
embedder_stats: Option<Arc<EmbedderStats>>,
|
||||
embedder_stats: Option<&EmbedderStats>,
|
||||
) -> Result<Vec<Embedding>, EmbedError> {
|
||||
embed(
|
||||
&self.data,
|
||||
@ -186,7 +185,7 @@ impl Embedder {
|
||||
&self,
|
||||
texts: &[S],
|
||||
deadline: Option<Instant>,
|
||||
embedder_stats: Option<Arc<EmbedderStats>>,
|
||||
embedder_stats: Option<&EmbedderStats>,
|
||||
) -> Result<Vec<Embedding>, EmbedError>
|
||||
where
|
||||
S: AsRef<str> + Serialize,
|
||||
@ -208,21 +207,21 @@ impl Embedder {
|
||||
&self,
|
||||
text_chunks: Vec<Vec<String>>,
|
||||
threads: &ThreadPoolNoAbort,
|
||||
embedder_stats: Arc<EmbedderStats>,
|
||||
embedder_stats: &EmbedderStats,
|
||||
) -> Result<Vec<Vec<Embedding>>, EmbedError> {
|
||||
// This condition helps reduce the number of active rayon jobs
|
||||
// so that we avoid consuming all the LMDB rtxns and avoid stack overflows.
|
||||
if threads.active_operations() >= REQUEST_PARALLELISM {
|
||||
text_chunks
|
||||
.into_iter()
|
||||
.map(move |chunk| self.embed(chunk, None, Some(embedder_stats.clone())))
|
||||
.map(move |chunk| self.embed(chunk, None, Some(embedder_stats)))
|
||||
.collect()
|
||||
} else {
|
||||
threads
|
||||
.install(move || {
|
||||
text_chunks
|
||||
.into_par_iter()
|
||||
.map(move |chunk| self.embed(chunk, None, Some(embedder_stats.clone())))
|
||||
.map(move |chunk| self.embed(chunk, None, Some(embedder_stats)))
|
||||
.collect()
|
||||
})
|
||||
.map_err(|error| EmbedError {
|
||||
@ -236,14 +235,14 @@ impl Embedder {
|
||||
&self,
|
||||
texts: &[&str],
|
||||
threads: &ThreadPoolNoAbort,
|
||||
embedder_stats: Arc<EmbedderStats>,
|
||||
embedder_stats: &EmbedderStats,
|
||||
) -> Result<Vec<Embedding>, EmbedError> {
|
||||
// This condition helps reduce the number of active rayon jobs
|
||||
// so that we avoid consuming all the LMDB rtxns and avoid stack overflows.
|
||||
if threads.active_operations() >= REQUEST_PARALLELISM {
|
||||
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
|
||||
.chunks(self.prompt_count_in_chunk_hint())
|
||||
.map(move |chunk| self.embed_ref(chunk, None, Some(embedder_stats.clone())))
|
||||
.map(move |chunk| self.embed_ref(chunk, None, Some(embedder_stats)))
|
||||
.collect();
|
||||
|
||||
let embeddings = embeddings?;
|
||||
@ -253,7 +252,7 @@ impl Embedder {
|
||||
.install(move || {
|
||||
let embeddings: Result<Vec<Vec<Embedding>>, _> = texts
|
||||
.par_chunks(self.prompt_count_in_chunk_hint())
|
||||
.map(move |chunk| self.embed_ref(chunk, None, Some(embedder_stats.clone())))
|
||||
.map(move |chunk| self.embed_ref(chunk, None, Some(embedder_stats)))
|
||||
.collect();
|
||||
|
||||
let embeddings = embeddings?;
|
||||
@ -303,7 +302,7 @@ fn embed<S>(
|
||||
expected_count: usize,
|
||||
expected_dimension: Option<usize>,
|
||||
deadline: Option<Instant>,
|
||||
embedder_stats: Option<Arc<EmbedderStats>>,
|
||||
embedder_stats: Option<&EmbedderStats>,
|
||||
) -> Result<Vec<Embedding>, EmbedError>
|
||||
where
|
||||
S: Serialize,
|
||||
@ -323,7 +322,7 @@ where
|
||||
|
||||
for attempt in 0..10 {
|
||||
if let Some(embedder_stats) = &embedder_stats {
|
||||
embedder_stats.as_ref().total_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
embedder_stats.total_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
let response = request.clone().send_json(&body);
|
||||
let result = check_response(response, data.configuration_source).and_then(|response| {
|
||||
@ -367,7 +366,7 @@ where
|
||||
}
|
||||
|
||||
if let Some(embedder_stats) = &embedder_stats {
|
||||
embedder_stats.as_ref().total_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
embedder_stats.total_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
let response = request.send_json(&body);
|
||||
let result = check_response(response, data.configuration_source).and_then(|response| {
|
||||
|
Reference in New Issue
Block a user