@@ -211,7 +211,7 @@ def __init__(
211211 The parameters `batch_size` and `num_workers` are only
212212 relevant if this code runs on a CPU. Both can be set
213213 automatically via the function
214- `set_opt_num_workers_and_batch_size_fast `. Enabling
214+ `prepare_parallel `. Enabling
215215 calculations via PyTorch is only recommended with GPU
216216 support. CPU calcuations are faster with our NumPy
217217 implementation.
@@ -354,7 +354,7 @@ def _set_num_workers(
354354 Note
355355 ----
356356 This parameter can be optimally determined using
357- `set_opt_num_workers_and_batch_size_fast `.
357+ `prepare_parallel `.
358358
359359 Parameters
360360 ----------
@@ -414,7 +414,7 @@ def _set_batch_size(
414414 Note
415415 ----
416416 This parameter can be optimally determined using
417- `set_opt_num_workers_and_batch_size_fast `.
417+ `prepare_parallel `.
418418
419419 Parameters
420420 ----------
@@ -441,6 +441,8 @@ def _set_bulk_mp(
441441
442442 def set_opt_num_workers_and_batch_size_fast (self , n_bulk = 1 , n_reps = 1 ): # deprecated
443443 """
444+ Warning
445+ -------
444446 Deprecated! Please use the function `prepare_parallel` in future projects.
445447
446448 Parameters
@@ -497,7 +499,11 @@ def prepare_parallel(self, n_bulk=1, n_reps=1, return_is_from_cache=False): # n
497499 n_reps : int, optional
498500 Number of repetitions (bigger value: more
499501 accurate, but also slower).
500-
502+ return_is_from_cache : bool, optional
503+ If enabled, this function returns a second value
504+ indicating if the returned results were obtained
505+ from cache.
506+
501507 Returns
502508 -------
503509 int
@@ -883,4 +889,3 @@ def predict(self, r):
883889 F = desc .r_to_d_desc_op (r , pdist , res [1 :], self .ucell_size ).reshape (1 , - 1 )
884890 # F = res[1:].reshape(1,-1).dot(r_d_desc)
885891 return E , F
886-
0 commit comments