Skip to content

Commit 9df759a

Browse files
anton-107claude
andauthored
Hide verbose SSH connect output, add spinners (#4694)
## Summary - Move detailed diagnostic messages (SSH key paths, secrets scope, remote user/port, job submission details, upload progress) from `cmdio.LogString` to `log.Infof` so they only appear with `--log-level=info` - Add spinners for long-running operations: binary upload, cluster state check, job startup wait, and metadata polling - Keep concise user-facing step messages (`Connecting to <id>...`, `Uploading binaries...`, `Starting SSH server...`, `Connected!`) for progress visibility - Keep job run ID visible to users (via `cmdio.LogString`) since it's useful for debugging Resolves DECO-26523 ## Test plan - [x] Run `databricks ssh connect` and verify only concise step messages + spinners are shown - [x] Run with `--log-level=info` and verify detailed messages appear - [x] Verify spinners display and clear correctly for each long operation - [ ] Test non-interactive terminal (piped output) — spinners should degrade gracefully 🤖 Generated with [Claude Code](https://claude.com/claude-code) --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 8c7ec3b commit 9df759a

File tree

5 files changed

+50
-32
lines changed

5 files changed

+50
-32
lines changed

experimental/ssh/internal/client/client.go

Lines changed: 37 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,10 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
207207
return errors.New("either --cluster or --name must be provided")
208208
}
209209

210+
if !opts.ProxyMode {
211+
cmdio.LogString(ctx, fmt.Sprintf("Connecting to %s...", sessionID))
212+
}
213+
210214
if opts.IDE != "" && !opts.ProxyMode {
211215
if err := vscode.CheckIDECommand(opts.IDE); err != nil {
212216
return err
@@ -238,6 +242,7 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
238242

239243
// Only check cluster state for dedicated clusters
240244
if !opts.IsServerlessMode() {
245+
cmdio.LogString(ctx, "Checking cluster state...")
241246
err := checkClusterState(ctx, client, opts.ClusterID, opts.AutoStartCluster)
242247
if err != nil {
243248
return err
@@ -263,8 +268,8 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
263268
if err != nil {
264269
return fmt.Errorf("failed to save SSH key pair locally: %w", err)
265270
}
266-
cmdio.LogString(ctx, "Using SSH key: "+keyPath)
267-
cmdio.LogString(ctx, fmt.Sprintf("Secrets scope: %s, key name: %s", secretScopeName, opts.ClientPublicKeyName))
271+
log.Infof(ctx, "Using SSH key: %s", keyPath)
272+
log.Infof(ctx, "Secrets scope: %s, key name: %s", secretScopeName, opts.ClientPublicKeyName)
268273

269274
var userName string
270275
var serverPort int
@@ -273,8 +278,12 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
273278
version := build.GetInfo().Version
274279

275280
if opts.ServerMetadata == "" {
276-
cmdio.LogString(ctx, "Checking for ssh-tunnel binaries to upload...")
277-
if err := UploadTunnelReleases(ctx, client, version, opts.ReleasesDir); err != nil {
281+
cmdio.LogString(ctx, "Uploading binaries...")
282+
sp := cmdio.NewSpinner(ctx)
283+
sp.Update("Uploading binaries...")
284+
err := UploadTunnelReleases(ctx, client, version, opts.ReleasesDir)
285+
sp.Close()
286+
if err != nil {
278287
return fmt.Errorf("failed to upload ssh-tunnel binaries: %w", err)
279288
}
280289
userName, serverPort, clusterID, err = ensureSSHServerIsRunning(ctx, client, version, secretScopeName, opts)
@@ -307,18 +316,22 @@ func Run(ctx context.Context, client *databricks.WorkspaceClient, opts ClientOpt
307316
return errors.New("cluster ID is required for serverless connections but was not found in metadata")
308317
}
309318

310-
cmdio.LogString(ctx, "Remote user name: "+userName)
311-
cmdio.LogString(ctx, fmt.Sprintf("Server port: %d", serverPort))
319+
log.Infof(ctx, "Remote user name: %s", userName)
320+
log.Infof(ctx, "Server port: %d", serverPort)
312321
if opts.IsServerlessMode() {
313-
cmdio.LogString(ctx, "Cluster ID (from serverless job): "+clusterID)
322+
log.Infof(ctx, "Cluster ID (from serverless job): %s", clusterID)
323+
}
324+
325+
if !opts.ProxyMode {
326+
cmdio.LogString(ctx, "Connected!")
314327
}
315328

316329
if opts.ProxyMode {
317330
return runSSHProxy(ctx, client, serverPort, clusterID, opts)
318331
} else if opts.IDE != "" {
319332
return runIDE(ctx, client, userName, keyPath, serverPort, clusterID, opts)
320333
} else {
321-
cmdio.LogString(ctx, fmt.Sprintf("Additional SSH arguments: %v", opts.AdditionalArgs))
334+
log.Infof(ctx, "Additional SSH arguments: %v", opts.AdditionalArgs)
322335
return spawnSSHClient(ctx, userName, keyPath, serverPort, clusterID, opts)
323336
}
324337
}
@@ -372,7 +385,7 @@ func ensureSSHConfigEntry(ctx context.Context, configPath, hostName, userName, k
372385
return err
373386
}
374387

375-
cmdio.LogString(ctx, fmt.Sprintf("Updated SSH config entry for '%s'", hostName))
388+
log.Infof(ctx, "Updated SSH config entry for '%s'", hostName)
376389
return nil
377390
}
378391

@@ -471,7 +484,7 @@ func submitSSHTunnelJob(ctx context.Context, client *databricks.WorkspaceClient,
471484
"serverless": strconv.FormatBool(opts.IsServerlessMode()),
472485
}
473486

474-
cmdio.LogString(ctx, "Submitting a job to start the ssh server...")
487+
log.Infof(ctx, "Submitting a job to start the ssh server...")
475488

476489
task := jobs.SubmitTask{
477490
TaskKey: sshServerTaskKey,
@@ -485,7 +498,7 @@ func submitSSHTunnelJob(ctx context.Context, client *databricks.WorkspaceClient,
485498
if opts.IsServerlessMode() {
486499
task.EnvironmentKey = serverlessEnvironmentKey
487500
if opts.Accelerator != "" {
488-
cmdio.LogString(ctx, "Using accelerator: "+opts.Accelerator)
501+
log.Infof(ctx, "Using accelerator: %s", opts.Accelerator)
489502
task.Compute = &jobs.Compute{
490503
HardwareAccelerator: compute.HardwareAcceleratorType(opts.Accelerator),
491504
}
@@ -568,14 +581,16 @@ func runSSHProxy(ctx context.Context, client *databricks.WorkspaceClient, server
568581
}
569582

570583
func checkClusterState(ctx context.Context, client *databricks.WorkspaceClient, clusterID string, autoStart bool) error {
584+
sp := cmdio.NewSpinner(ctx)
585+
defer sp.Close()
571586
if autoStart {
572-
cmdio.LogString(ctx, "Ensuring the cluster is running: "+clusterID)
587+
sp.Update("Ensuring the cluster is running...")
573588
err := client.Clusters.EnsureClusterIsRunning(ctx, clusterID)
574589
if err != nil {
575590
return fmt.Errorf("failed to ensure that the cluster is running: %w", err)
576591
}
577592
} else {
578-
cmdio.LogString(ctx, "Checking cluster state: "+clusterID)
593+
sp.Update("Checking cluster state...")
579594
cluster, err := client.Clusters.GetByClusterId(ctx, clusterID)
580595
if err != nil {
581596
return fmt.Errorf("failed to get cluster info: %w", err)
@@ -590,7 +605,9 @@ func checkClusterState(ctx context.Context, client *databricks.WorkspaceClient,
590605
// waitForJobToStart polls the task status until the SSH server task is in RUNNING state or terminates.
591606
// Returns an error if the task fails to start or if polling times out.
592607
func waitForJobToStart(ctx context.Context, client *databricks.WorkspaceClient, runID int64, taskStartupTimeout time.Duration) error {
593-
cmdio.LogString(ctx, "Waiting for the SSH server task to start...")
608+
sp := cmdio.NewSpinner(ctx)
609+
defer sp.Close()
610+
sp.Update("Starting SSH server...")
594611
var prevState jobs.RunLifecycleStateV2State
595612

596613
_, err := retries.Poll(ctx, taskStartupTimeout, func() (*jobs.RunTask, *retries.Err) {
@@ -620,15 +637,14 @@ func waitForJobToStart(ctx context.Context, client *databricks.WorkspaceClient,
620637

621638
currentState := sshTask.Status.State
622639

623-
// Print status if it changed
640+
// Update spinner if state changed
624641
if currentState != prevState {
625-
cmdio.LogString(ctx, fmt.Sprintf("Task status: %s", currentState))
642+
sp.Update(fmt.Sprintf("Starting SSH server... (task: %s)", currentState))
626643
prevState = currentState
627644
}
628645

629646
// Check if task is running
630647
if currentState == jobs.RunLifecycleStateV2StateRunning {
631-
cmdio.LogString(ctx, "SSH server task is now running, proceeding to connect...")
632648
return sshTask, nil
633649
}
634650

@@ -651,22 +667,23 @@ func ensureSSHServerIsRunning(ctx context.Context, client *databricks.WorkspaceC
651667

652668
serverPort, userName, effectiveClusterID, err := getServerMetadata(ctx, client, sessionID, clusterID, version, opts.Liteswap)
653669
if errors.Is(err, errServerMetadata) {
654-
cmdio.LogString(ctx, "SSH server is not running, starting it now...")
670+
cmdio.LogString(ctx, "Starting SSH server...")
655671

656672
err := submitSSHTunnelJob(ctx, client, version, secretScopeName, opts)
657673
if err != nil {
658674
return "", 0, "", fmt.Errorf("failed to submit and start ssh server job: %w", err)
659675
}
660676

661-
cmdio.LogString(ctx, "Waiting for the ssh server to start...")
677+
sp := cmdio.NewSpinner(ctx)
678+
defer sp.Close()
679+
sp.Update("Waiting for the SSH server to start...")
662680
maxRetries := 30
663681
for retries := range maxRetries {
664682
if ctx.Err() != nil {
665683
return "", 0, "", ctx.Err()
666684
}
667685
serverPort, userName, effectiveClusterID, err = getServerMetadata(ctx, client, sessionID, clusterID, version, opts.Liteswap)
668686
if err == nil {
669-
cmdio.LogString(ctx, "Health check successful, starting ssh WebSocket connection...")
670687
break
671688
} else if retries < maxRetries-1 {
672689
time.Sleep(2 * time.Second)

experimental/ssh/internal/client/releases.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@ import (
1212
"strings"
1313

1414
"github.com/databricks/cli/experimental/ssh/internal/workspace"
15-
"github.com/databricks/cli/libs/cmdio"
1615
"github.com/databricks/cli/libs/filer"
16+
"github.com/databricks/cli/libs/log"
1717
"github.com/databricks/databricks-sdk-go"
1818
)
1919

@@ -48,7 +48,7 @@ func uploadReleases(ctx context.Context, workspaceFiler filer.Filer, getRelease
4848

4949
_, err := workspaceFiler.Stat(ctx, remoteBinaryPath)
5050
if err == nil {
51-
cmdio.LogString(ctx, fmt.Sprintf("File %s already exists in the workspace, skipping upload", remoteBinaryPath))
51+
log.Infof(ctx, "File %s already exists in the workspace, skipping upload", remoteBinaryPath)
5252
continue
5353
} else if !errors.Is(err, fs.ErrNotExist) {
5454
return fmt.Errorf("failed to check if file %s exists in workspace: %w", remoteBinaryPath, err)
@@ -60,14 +60,14 @@ func uploadReleases(ctx context.Context, workspaceFiler filer.Filer, getRelease
6060
}
6161
defer releaseReader.Close()
6262

63-
cmdio.LogString(ctx, fmt.Sprintf("Uploading %s to the workspace", fileName))
63+
log.Infof(ctx, "Uploading %s to the workspace", fileName)
6464
// workspace-files/import-file API will automatically unzip the payload,
6565
// producing the filerRoot/remoteSubFolder/*archive-contents* structure, with 'databricks' binary inside.
6666
err = workspaceFiler.Write(ctx, remoteArchivePath, releaseReader, filer.OverwriteIfExists, filer.CreateParentDirectories)
6767
if err != nil {
6868
return fmt.Errorf("failed to upload file %s to workspace: %w", remoteArchivePath, err)
6969
}
70-
cmdio.LogString(ctx, fmt.Sprintf("Successfully uploaded %s to workspace", remoteBinaryPath))
70+
log.Infof(ctx, "Successfully uploaded %s to workspace", remoteBinaryPath)
7171
}
7272

7373
return nil
@@ -81,7 +81,7 @@ func getReleaseName(architecture, version string) string {
8181
}
8282

8383
func getLocalRelease(ctx context.Context, architecture, version, releasesDir string) (io.ReadCloser, error) {
84-
cmdio.LogString(ctx, "Looking for CLI releases in directory: "+releasesDir)
84+
log.Infof(ctx, "Looking for CLI releases in directory: %s", releasesDir)
8585
releaseName := getReleaseName(architecture, version)
8686
releasePath := filepath.Join(releasesDir, releaseName)
8787
file, err := os.Open(releasePath)
@@ -95,7 +95,7 @@ func getGithubRelease(ctx context.Context, architecture, version, releasesDir st
9595
// TODO: download and check databricks_cli_<version>_SHA256SUMS
9696
fileName := getReleaseName(architecture, version)
9797
downloadURL := fmt.Sprintf("https://github.com/databricks/cli/releases/download/v%s/%s", version, fileName)
98-
cmdio.LogString(ctx, fmt.Sprintf("Downloading %s from %s", fileName, downloadURL))
98+
log.Infof(ctx, "Downloading %s from %s", fileName, downloadURL)
9999

100100
resp, err := http.Get(downloadURL)
101101
if err != nil {

experimental/ssh/internal/proxy/client.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,19 @@ import (
66
"io"
77
"time"
88

9-
"github.com/databricks/cli/libs/cmdio"
9+
"github.com/databricks/cli/libs/log"
1010
"golang.org/x/sync/errgroup"
1111
)
1212

1313
func RunClientProxy(ctx context.Context, src io.ReadCloser, dst io.Writer, requestHandoverTick func() <-chan time.Time, createConn createWebsocketConnectionFunc) error {
1414
proxy := newProxyConnection(createConn)
15-
cmdio.LogString(ctx, "Establishing SSH proxy connection...")
15+
log.Infof(ctx, "Establishing SSH proxy connection...")
1616
g, gCtx := errgroup.WithContext(ctx)
1717
if err := proxy.connect(gCtx); err != nil {
1818
return fmt.Errorf("failed to connect to proxy: %w", err)
1919
}
2020
defer proxy.close()
21-
cmdio.LogString(ctx, "SSH proxy connection established")
21+
log.Infof(ctx, "SSH proxy connection established")
2222

2323
g.Go(func() error {
2424
for {

experimental/ssh/internal/vscode/run.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"strings"
99

1010
"github.com/databricks/cli/libs/cmdio"
11+
"github.com/databricks/cli/libs/log"
1112
"golang.org/x/mod/semver"
1213
)
1314

@@ -148,7 +149,7 @@ func LaunchIDE(ctx context.Context, ideOption, connectionName, userName, databri
148149
remoteURI := fmt.Sprintf("ssh-remote+%s@%s", userName, connectionName)
149150
remotePath := fmt.Sprintf("/Workspace/Users/%s/", databricksUserName)
150151

151-
cmdio.LogString(ctx, fmt.Sprintf("Launching %s with remote URI: %s and path: %s", ideOption, remoteURI, remotePath))
152+
log.Infof(ctx, "Launching %s with remote URI: %s and path: %s", ideOption, remoteURI, remotePath)
152153

153154
ideCmd := exec.CommandContext(ctx, ide.Command, "--remote", remoteURI, remotePath)
154155
ideCmd.Stdout = os.Stdout

experimental/ssh/internal/vscode/settings.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,11 +289,11 @@ func backupSettings(ctx context.Context, path string) error {
289289
latestBak := path + ".latest.bak"
290290

291291
if _, err := os.Stat(originalBak); os.IsNotExist(err) {
292-
cmdio.LogString(ctx, "Backing up settings to "+filepath.ToSlash(originalBak))
292+
log.Infof(ctx, "Backing up settings to %s", filepath.ToSlash(originalBak))
293293
return os.WriteFile(originalBak, data, 0o600)
294294
}
295295

296-
cmdio.LogString(ctx, "Backing up settings to "+filepath.ToSlash(latestBak))
296+
log.Infof(ctx, "Backing up settings to %s", filepath.ToSlash(latestBak))
297297
return os.WriteFile(latestBak, data, 0o600)
298298
}
299299

0 commit comments

Comments
 (0)