@@ -207,6 +207,10 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
207207 return errors .New ("either --cluster or --name must be provided" )
208208 }
209209
210+ if ! opts .ProxyMode {
211+ cmdio .LogString (ctx , fmt .Sprintf ("Connecting to %s..." , sessionID ))
212+ }
213+
210214 if opts .IDE != "" && ! opts .ProxyMode {
211215 if err := vscode .CheckIDECommand (opts .IDE ); err != nil {
212216 return err
@@ -238,6 +242,7 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
238242
239243 // Only check cluster state for dedicated clusters
240244 if ! opts .IsServerlessMode () {
245+ cmdio .LogString (ctx , "Checking cluster state..." )
241246 err := checkClusterState (ctx , client , opts .ClusterID , opts .AutoStartCluster )
242247 if err != nil {
243248 return err
@@ -263,8 +268,8 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
263268 if err != nil {
264269 return fmt .Errorf ("failed to save SSH key pair locally: %w" , err )
265270 }
266- cmdio . LogString (ctx , "Using SSH key: " + keyPath )
267- cmdio . LogString (ctx , fmt . Sprintf ( "Secrets scope: %s, key name: %s" , secretScopeName , opts .ClientPublicKeyName ) )
271+ log . Infof (ctx , "Using SSH key: %s" , keyPath )
272+ log . Infof (ctx , "Secrets scope: %s, key name: %s" , secretScopeName , opts .ClientPublicKeyName )
268273
269274 var userName string
270275 var serverPort int
@@ -273,8 +278,12 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
273278 version := build .GetInfo ().Version
274279
275280 if opts .ServerMetadata == "" {
276- cmdio .LogString (ctx , "Checking for ssh-tunnel binaries to upload..." )
277- if err := UploadTunnelReleases (ctx , client , version , opts .ReleasesDir ); err != nil {
281+ cmdio .LogString (ctx , "Uploading binaries..." )
282+ sp := cmdio .NewSpinner (ctx )
283+ sp .Update ("Uploading binaries..." )
284+ err := UploadTunnelReleases (ctx , client , version , opts .ReleasesDir )
285+ sp .Close ()
286+ if err != nil {
278287 return fmt .Errorf ("failed to upload ssh-tunnel binaries: %w" , err )
279288 }
280289 userName , serverPort , clusterID , err = ensureSSHServerIsRunning (ctx , client , version , secretScopeName , opts )
@@ -307,18 +316,22 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
307316 return errors .New ("cluster ID is required for serverless connections but was not found in metadata" )
308317 }
309318
310- cmdio . LogString (ctx , "Remote user name: " + userName )
311- cmdio . LogString (ctx , fmt . Sprintf ( "Server port: %d" , serverPort ) )
319+ log . Infof (ctx , "Remote user name: %s" , userName )
320+ log . Infof (ctx , "Server port: %d" , serverPort )
312321 if opts .IsServerlessMode () {
313- cmdio .LogString (ctx , "Cluster ID (from serverless job): " + clusterID )
322+ log .Infof (ctx , "Cluster ID (from serverless job): %s" , clusterID )
323+ }
324+
325+ if ! opts .ProxyMode {
326+ cmdio .LogString (ctx , "Connected!" )
314327 }
315328
316329 if opts .ProxyMode {
317330 return runSSHProxy (ctx , client , serverPort , clusterID , opts )
318331 } else if opts .IDE != "" {
319332 return runIDE (ctx , client , userName , keyPath , serverPort , clusterID , opts )
320333 } else {
321- cmdio . LogString (ctx , fmt . Sprintf ( "Additional SSH arguments: %v" , opts .AdditionalArgs ) )
334+ log . Infof (ctx , "Additional SSH arguments: %v" , opts .AdditionalArgs )
322335 return spawnSSHClient (ctx , userName , keyPath , serverPort , clusterID , opts )
323336 }
324337}
@@ -372,7 +385,7 @@ func ensureSSHConfigEntry(ctx context.Context, configPath, hostName, userName, k
372385 return err
373386 }
374387
375- cmdio . LogString (ctx , fmt . Sprintf ( "Updated SSH config entry for '%s'" , hostName ) )
388+ log . Infof (ctx , "Updated SSH config entry for '%s'" , hostName )
376389 return nil
377390}
378391
@@ -471,7 +484,7 @@ func submitSSHTunnelJob(ctx context.Context, client *databricks.WorkspaceClient,
471484 "serverless" : strconv .FormatBool (opts .IsServerlessMode ()),
472485 }
473486
474- cmdio . LogString (ctx , "Submitting a job to start the ssh server..." )
487+ log . Infof (ctx , "Submitting a job to start the ssh server..." )
475488
476489 task := jobs.SubmitTask {
477490 TaskKey : sshServerTaskKey ,
@@ -485,7 +498,7 @@ func submitSSHTunnelJob(ctx context.Context, client *databricks.WorkspaceClient,
485498 if opts .IsServerlessMode () {
486499 task .EnvironmentKey = serverlessEnvironmentKey
487500 if opts .Accelerator != "" {
488- cmdio . LogString (ctx , "Using accelerator: " + opts .Accelerator )
501+ log . Infof (ctx , "Using accelerator: %s" , opts .Accelerator )
489502 task .Compute = & jobs.Compute {
490503 HardwareAccelerator : compute .HardwareAcceleratorType (opts .Accelerator ),
491504 }
@@ -568,14 +581,16 @@ func runSSHProxy(ctx context.Context, client *databricks.WorkspaceClient, server
568581}
569582
570583func checkClusterState (ctx context.Context , client * databricks.WorkspaceClient , clusterID string , autoStart bool ) error {
584+ sp := cmdio .NewSpinner (ctx )
585+ defer sp .Close ()
571586 if autoStart {
572- cmdio . LogString ( ctx , "Ensuring the cluster is running: " + clusterID )
587+ sp . Update ( "Ensuring the cluster is running..." )
573588 err := client .Clusters .EnsureClusterIsRunning (ctx , clusterID )
574589 if err != nil {
575590 return fmt .Errorf ("failed to ensure that the cluster is running: %w" , err )
576591 }
577592 } else {
578- cmdio . LogString ( ctx , "Checking cluster state: " + clusterID )
593+ sp . Update ( "Checking cluster state..." )
579594 cluster , err := client .Clusters .GetByClusterId (ctx , clusterID )
580595 if err != nil {
581596 return fmt .Errorf ("failed to get cluster info: %w" , err )
@@ -590,7 +605,9 @@ func checkClusterState(ctx context.Context, client *databricks.WorkspaceClient,
590605// waitForJobToStart polls the task status until the SSH server task is in RUNNING state or terminates.
591606// Returns an error if the task fails to start or if polling times out.
592607func waitForJobToStart (ctx context.Context , client * databricks.WorkspaceClient , runID int64 , taskStartupTimeout time.Duration ) error {
593- cmdio .LogString (ctx , "Waiting for the SSH server task to start..." )
608+ sp := cmdio .NewSpinner (ctx )
609+ defer sp .Close ()
610+ sp .Update ("Starting SSH server..." )
594611 var prevState jobs.RunLifecycleStateV2State
595612
596613 _ , err := retries .Poll (ctx , taskStartupTimeout , func () (* jobs.RunTask , * retries.Err ) {
@@ -620,15 +637,14 @@ func waitForJobToStart(ctx context.Context, client *databricks.WorkspaceClient,
620637
621638 currentState := sshTask .Status .State
622639
623- // Print status if it changed
640+ // Update spinner if state changed
624641 if currentState != prevState {
625- cmdio . LogString ( ctx , fmt .Sprintf ("Task status : %s" , currentState ))
642+ sp . Update ( fmt .Sprintf ("Starting SSH server... (task : %s) " , currentState ))
626643 prevState = currentState
627644 }
628645
629646 // Check if task is running
630647 if currentState == jobs .RunLifecycleStateV2StateRunning {
631- cmdio .LogString (ctx , "SSH server task is now running, proceeding to connect..." )
632648 return sshTask , nil
633649 }
634650
@@ -651,22 +667,23 @@ func ensureSSHServerIsRunning(ctx context.Context, client *databricks.WorkspaceC
651667
652668 serverPort , userName , effectiveClusterID , err := getServerMetadata (ctx , client , sessionID , clusterID , version , opts .Liteswap )
653669 if errors .Is (err , errServerMetadata ) {
654- cmdio .LogString (ctx , "SSH server is not running, starting it now ..." )
670+ cmdio .LogString (ctx , "Starting SSH server..." )
655671
656672 err := submitSSHTunnelJob (ctx , client , version , secretScopeName , opts )
657673 if err != nil {
658674 return "" , 0 , "" , fmt .Errorf ("failed to submit and start ssh server job: %w" , err )
659675 }
660676
661- cmdio .LogString (ctx , "Waiting for the ssh server to start..." )
677+ sp := cmdio .NewSpinner (ctx )
678+ defer sp .Close ()
679+ sp .Update ("Waiting for the SSH server to start..." )
662680 maxRetries := 30
663681 for retries := range maxRetries {
664682 if ctx .Err () != nil {
665683 return "" , 0 , "" , ctx .Err ()
666684 }
667685 serverPort , userName , effectiveClusterID , err = getServerMetadata (ctx , client , sessionID , clusterID , version , opts .Liteswap )
668686 if err == nil {
669- cmdio .LogString (ctx , "Health check successful, starting ssh WebSocket connection..." )
670687 break
671688 } else if retries < maxRetries - 1 {
672689 time .Sleep (2 * time .Second )
0 commit comments