@@ -148,7 +148,7 @@ def _init_kernel_operator(
148148 n_train = R_desc .shape [0 ]
149149
150150 # dummy alphas
151- v_F = np .zeros ((n - n_train , 1 )) if task ['use_E_cstr' ] else np .zeros ((n , 1 ))
151+ v_F = np .zeros ((n - n_train , 1 )) if task ['use_E_cstr' ] else np .zeros ((n , 1 ))
152152 v_E = np .zeros ((n_train , 1 )) if task ['use_E_cstr' ] else None
153153
154154 # Note: The standard deviation is set to 1.0, because we are predicting normalized labels here.
@@ -372,12 +372,16 @@ def _lev_scores(
372372 dim_m = dim_i * min (n_inducing_pts , 10 )
373373
374374 # Which columns to use for leverage score approximation?
375- lev_approx_idxs = np .sort (np .random .choice (n_train * dim_i + (n_train if use_E_cstr else 0 ), dim_m , replace = False )) # random subset of columns
376- #lev_approx_idxs = np.sort(np.random.choice(n_train*dim_i, dim_m, replace=False)) # random subset of columns
377-
378- #lev_approx_idxs = np.s_[
375+ lev_approx_idxs = np .sort (
376+ np .random .choice (
377+ n_train * dim_i + (n_train if use_E_cstr else 0 ), dim_m , replace = False
378+ )
379+ ) # random subset of columns
380+ # lev_approx_idxs = np.sort(np.random.choice(n_train*dim_i, dim_m, replace=False)) # random subset of columns
381+
382+ # lev_approx_idxs = np.s_[
379383 # :dim_m
380- #] # first 'dim_m' columns (faster kernel construction)
384+ # ] # first 'dim_m' columns (faster kernel construction)
381385
382386 L_inv_K_mn = self ._nystroem_cholesky_factor (
383387 R_desc ,
@@ -460,7 +464,7 @@ def _cho_factor_stable(self, M, pre_reg=False, eps_mag_max=1):
460464
461465 self .log .critical (
462466 'Failed to factorize despite strong regularization (max: {})!\n You could try a larger sigma.' .format (
463- 10.0 ** eps_mag_max
467+ 10.0 ** eps_mag_max
464468 )
465469 )
466470 print ()
@@ -492,7 +496,7 @@ def solve(
492496 num_iters0 = task ['solver_iters' ] if 'solver_iters' in task else 0
493497
494498 # Number of inducing points to use for Nystrom approximation.
495- max_memory_bytes = self ._max_memory * 1024 ** 3
499+ max_memory_bytes = self ._max_memory * 1024 ** 3
496500 max_n_inducing_pts = Iterative .max_n_inducing_pts (
497501 n_train , n_atoms , max_memory_bytes
498502 )
@@ -849,13 +853,12 @@ def max_n_inducing_pts(n_train, n_atoms, max_memory_bytes):
849853 ny_factor = SQUARE_FACT * to_dof
850854
851855 n_inducing_pts = (
852- np .sqrt (sq_factor ** 2 + 4.0 * ny_factor * max_memory_bytes ) - sq_factor
856+ np .sqrt (sq_factor ** 2 + 4.0 * ny_factor * max_memory_bytes ) - sq_factor
853857 ) / (2 * ny_factor )
854858 n_inducing_pts = int (n_inducing_pts )
855859
856860 return min (n_inducing_pts , n_train )
857861
858-
859862 @staticmethod
860863 def est_memory_requirement (n_train , n_inducing_pts , n_atoms ):
861864
0 commit comments