diff --git a/acceptance/bundle/deploy/secret-scope/databricks.yml.tmpl b/acceptance/bundle/deploy/secret-scope/databricks.yml.tmpl new file mode 100644 index 0000000000..de8534a4e5 --- /dev/null +++ b/acceptance/bundle/deploy/secret-scope/databricks.yml.tmpl @@ -0,0 +1,9 @@ + +bundle: + name: deploy-secret-scope-test-$UNIQUE_NAME + +resources: + secret_scopes: + secret_scope1: + name: my-secrets + initial_manage_principal: users diff --git a/acceptance/bundle/deploy/secret-scope/output.txt b/acceptance/bundle/deploy/secret-scope/output.txt new file mode 100644 index 0000000000..3a165c2a1e --- /dev/null +++ b/acceptance/bundle/deploy/secret-scope/output.txt @@ -0,0 +1,44 @@ + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/deploy-secret-scope-test-[UNIQUE_NAME]/default/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] bundle summary --output json +{ + "initial_manage_principal": "users", + "modified_status": "created", + "name": "my-secrets" +} + +>>> [CLI] secrets list-scopes -o json +{ + "backend_type": "DATABRICKS", + "name": "my-secrets" +} + +>>> [CLI] secrets list-acls my-secrets +[ + { + "permission": "MANAGE", + "principal": "[USERNAME]" + } +] + +>>> [CLI] secrets put-secret my-secrets my-key --string-value my-secret-value + +>>> [CLI] secrets get-secret my-secrets my-key +{ + "key":"my-key", + "value":"bXktc2VjcmV0LXZhbHVl" +} + +>>> [CLI] bundle destroy --auto-approve +The following resources will be deleted: + delete secret_scope secret_scope1 + +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/deploy-secret-scope-test-[UNIQUE_NAME]/default + +Deleting files... +Destroy complete! diff --git a/acceptance/bundle/deploy/secret-scope/script b/acceptance/bundle/deploy/secret-scope/script new file mode 100644 index 0000000000..d52c0339e7 --- /dev/null +++ b/acceptance/bundle/deploy/secret-scope/script @@ -0,0 +1,15 @@ +envsubst < databricks.yml.tmpl > databricks.yml + +cleanup() { + trace $CLI bundle destroy --auto-approve +} +trap cleanup EXIT + +trace $CLI bundle deploy +trace $CLI bundle summary --output json | jq '.resources.secret_scopes.secret_scope1' + +trace $CLI secrets list-scopes -o json | jq '.[] | select(.name == "my-secrets")' +trace $CLI secrets list-acls my-secrets + +trace $CLI secrets put-secret my-secrets my-key --string-value "my-secret-value" +trace $CLI secrets get-secret my-secrets my-key diff --git a/acceptance/bundle/deploy/secret-scope/test.toml b/acceptance/bundle/deploy/secret-scope/test.toml new file mode 100644 index 0000000000..17c3300bf8 --- /dev/null +++ b/acceptance/bundle/deploy/secret-scope/test.toml @@ -0,0 +1,50 @@ +Cloud = true +Local = true + +Ignore = [ + "databricks.yml", +] + +[[Server]] +Pattern = "POST /api/2.0/secrets/scopes/create" + +[[Server]] +Pattern = "GET /api/2.0/secrets/scopes/list" +Response.Body = ''' +{ + "scopes": [ + { + "backend_type": "DATABRICKS", + "name": "my-secrets" + } + ] +} +''' + +[[Server]] +Pattern = "POST /api/2.0/secrets/scopes/delete" + +[[Server]] +Pattern = "POST /api/2.0/secrets/put" + +[[Server]] +Pattern = "GET /api/2.0/secrets/get" +Response.Body = ''' +{ + "key":"my-key", + "value":"bXktc2VjcmV0LXZhbHVl" +} +''' + +[[Server]] +Pattern = "GET /api/2.0/secrets/acls/list" +Response.Body = ''' +{ + "items": [ + { + "permission": "MANAGE", + "principal": "[USERNAME]" + } + ] +} +''' diff --git a/acceptance/bundle/deployment/bind/secret-scope/databricks.yml.tmpl b/acceptance/bundle/deployment/bind/secret-scope/databricks.yml.tmpl new file mode 100644 index 0000000000..1377bb4923 --- /dev/null +++ b/acceptance/bundle/deployment/bind/secret-scope/databricks.yml.tmpl @@ -0,0 +1,7 @@ +bundle: + name: bind-dashboard-test-$UNIQUE_NAME + +resources: + secret_scopes: + secret_scope1: + name: $SECRET_SCOPE_NAME diff --git a/acceptance/bundle/deployment/bind/secret-scope/output.txt b/acceptance/bundle/deployment/bind/secret-scope/output.txt new file mode 100644 index 0000000000..d8e3368bda --- /dev/null +++ b/acceptance/bundle/deployment/bind/secret-scope/output.txt @@ -0,0 +1,35 @@ + +>>> [CLI] secrets create-scope test-secret-scope-[UUID] + +>>> [CLI] bundle deployment bind secret_scope1 test-secret-scope-[UUID] --auto-approve +Updating deployment state... +Successfully bound secret_scope with an id 'test-secret-scope-[UUID]'. Run 'bundle deploy' to deploy changes to your workspace + +>>> [CLI] bundle deploy +Uploading bundle files to /Workspace/Users/[USERNAME]/.bundle/bind-dashboard-test-[UNIQUE_NAME]/default/files... +Deploying resources... +Updating deployment state... +Deployment complete! + +>>> [CLI] secrets list-scopes -o json +{ + "backend_type": "DATABRICKS", + "name": "test-secret-scope-[UUID]" +} + +>>> [CLI] bundle deployment unbind secret_scope1 +Updating deployment state... + +>>> [CLI] bundle destroy --auto-approve +All files and directories at the following location will be deleted: /Workspace/Users/[USERNAME]/.bundle/bind-dashboard-test-[UNIQUE_NAME]/default + +Deleting files... +Destroy complete! + +>>> [CLI] secrets list-scopes -o json +{ + "backend_type": "DATABRICKS", + "name": "test-secret-scope-[UUID]" +} + +>>> [CLI] secrets delete-scope test-secret-scope-[UUID] diff --git a/acceptance/bundle/deployment/bind/secret-scope/script b/acceptance/bundle/deployment/bind/secret-scope/script new file mode 100644 index 0000000000..1dd2233851 --- /dev/null +++ b/acceptance/bundle/deployment/bind/secret-scope/script @@ -0,0 +1,26 @@ +SECRET_SCOPE_NAME="test-secret-scope-$(uuid)" +if [ -z "$CLOUD_ENV" ]; then + SECRET_SCOPE_NAME="test-secret-scope-6260d50f-e8ff-4905-8f28-812345678903" # use hard-coded uuid when running locally +fi +export SECRET_SCOPE_NAME +envsubst < databricks.yml.tmpl > databricks.yml + +# Create a pre-defined volume: +trace $CLI secrets create-scope "${SECRET_SCOPE_NAME}" + +cleanup() { + trace $CLI secrets delete-scope "${SECRET_SCOPE_NAME}" +} +trap cleanup EXIT + +trace $CLI bundle deployment bind secret_scope1 "${SECRET_SCOPE_NAME}" --auto-approve + +trace $CLI bundle deploy + +trace $CLI secrets list-scopes -o json | jq --arg value ${SECRET_SCOPE_NAME} '.[] | select(.name == $value)' + +trace $CLI bundle deployment unbind secret_scope1 + +trace $CLI bundle destroy --auto-approve + +trace $CLI secrets list-scopes -o json | jq --arg value ${SECRET_SCOPE_NAME} '.[] | select(.name == $value)' diff --git a/acceptance/bundle/deployment/bind/secret-scope/test.toml b/acceptance/bundle/deployment/bind/secret-scope/test.toml new file mode 100644 index 0000000000..3c38ba78fc --- /dev/null +++ b/acceptance/bundle/deployment/bind/secret-scope/test.toml @@ -0,0 +1,26 @@ +Local = true +Cloud = true +RequiresUnityCatalog = true + +Ignore = [ + "databricks.yml", +] + +[[Server]] +Pattern = "POST /api/2.0/secrets/scopes/create" + +[[Server]] +Pattern = "GET /api/2.0/secrets/scopes/list" +Response.Body = ''' +{ + "scopes": [ + { + "backend_type": "DATABRICKS", + "name": "test-secret-scope-6260d50f-e8ff-4905-8f28-812345678903" + } + ] +} +''' + +[[Server]] +Pattern = "POST /api/2.0/secrets/scopes/delete" diff --git a/bundle/config/mutator/resourcemutator/apply_bundle_permissions.go b/bundle/config/mutator/resourcemutator/apply_bundle_permissions.go index 58fbe50eb3..16bb874c10 100644 --- a/bundle/config/mutator/resourcemutator/apply_bundle_permissions.go +++ b/bundle/config/mutator/resourcemutator/apply_bundle_permissions.go @@ -51,6 +51,10 @@ var ( permissions.CAN_MANAGE: "CAN_MANAGE", permissions.CAN_VIEW: "CAN_USE", }, + "secret_scopes": { + permissions.CAN_MANAGE: "MANAGE", + permissions.CAN_VIEW: "READ", + }, } ) diff --git a/bundle/config/mutator/resourcemutator/apply_target_mode_test.go b/bundle/config/mutator/resourcemutator/apply_target_mode_test.go index cbc5e2900b..fa55196645 100644 --- a/bundle/config/mutator/resourcemutator/apply_target_mode_test.go +++ b/bundle/config/mutator/resourcemutator/apply_target_mode_test.go @@ -6,6 +6,8 @@ import ( "slices" "testing" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" @@ -152,6 +154,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, }, + SecretScopes: map[string]*resources.SecretScope{ + "secretScope1": { + SecretScope: &workspace.SecretScope{ + Name: "secretScope1", + }, + }, + }, }, }, SyncRoot: vfs.MustNew("/Users/lennart.kats@databricks.com"), @@ -318,8 +327,8 @@ func TestAllNonUcResourcesAreRenamed(t *testing.T) { nameField := resource.Elem().FieldByName("Name") resourceType := resources.Type().Field(i).Name - // Skip apps, as they are not renamed - if resourceType == "Apps" { + // Skip resources that are not renamed + if resourceType == "Apps" || resourceType == "SecretScopes" { continue } diff --git a/bundle/config/mutator/resourcemutator/run_as.go b/bundle/config/mutator/resourcemutator/run_as.go index fb5408bfbd..dc553a601c 100644 --- a/bundle/config/mutator/resourcemutator/run_as.go +++ b/bundle/config/mutator/resourcemutator/run_as.go @@ -129,6 +129,16 @@ func validateRunAs(b *bundle.Bundle) diag.Diagnostics { )) } + // Secret Scopes do not support run_as in the API. + if len(b.Config.Resources.SecretScopes) > 0 { + diags = diags.Extend(reportRunAsNotSupported( + "secret_scopes", + b.Config.GetLocation("resources.secret_scopes"), + b.Config.Workspace.CurrentUser.UserName, + identity, + )) + } + return diags } diff --git a/bundle/config/mutator/resourcemutator/run_as_test.go b/bundle/config/mutator/resourcemutator/run_as_test.go index 664b955124..e4a419e035 100644 --- a/bundle/config/mutator/resourcemutator/run_as_test.go +++ b/bundle/config/mutator/resourcemutator/run_as_test.go @@ -43,6 +43,7 @@ func allResourceTypes(t *testing.T) []string { "quality_monitors", "registered_models", "schemas", + "secret_scopes", "volumes", }, resourceTypes, diff --git a/bundle/config/resources.go b/bundle/config/resources.go index cffd5ada87..115691b814 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -24,6 +24,7 @@ type Resources struct { Clusters map[string]*resources.Cluster `json:"clusters,omitempty"` Dashboards map[string]*resources.Dashboard `json:"dashboards,omitempty"` Apps map[string]*resources.App `json:"apps,omitempty"` + SecretScopes map[string]*resources.SecretScope `json:"secret_scopes,omitempty"` } type ConfigResource interface { @@ -92,6 +93,7 @@ func (r *Resources) AllResources() []ResourceGroup { collectResourceMap(descriptions["dashboards"], r.Dashboards), collectResourceMap(descriptions["volumes"], r.Volumes), collectResourceMap(descriptions["apps"], r.Apps), + collectResourceMap(descriptions["secret_scopes"], r.SecretScopes), } } @@ -163,6 +165,12 @@ func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) } } + for k := range r.SecretScopes { + if k == key { + found = append(found, r.SecretScopes[k]) + } + } + if len(found) == 0 { return nil, fmt.Errorf("no such resource: %s", key) } @@ -193,5 +201,6 @@ func SupportedResources() map[string]resources.ResourceDescription { "dashboards": (&resources.Dashboard{}).ResourceDescription(), "volumes": (&resources.Volume{}).ResourceDescription(), "apps": (&resources.App{}).ResourceDescription(), + "secret_scopes": (&resources.SecretScope{}).ResourceDescription(), } } diff --git a/bundle/config/resources/secret_scope.go b/bundle/config/resources/secret_scope.go new file mode 100644 index 0000000000..7d9aaa6ca9 --- /dev/null +++ b/bundle/config/resources/secret_scope.go @@ -0,0 +1,73 @@ +package resources + +import ( + "context" + "net/url" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type SecretScope struct { + Name string `json:"name"` + InitialManagePrincipal string `json:"initial_manage_principal"` + + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` + + *workspace.SecretScope +} + +func (s *SecretScope) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SecretScope) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +func (s SecretScope) Exists(ctx context.Context, w *databricks.WorkspaceClient, name string) (bool, error) { + scopes, err := w.Secrets.ListScopesAll(ctx) + if err != nil { + return false, nil + } + + for _, scope := range scopes { + if scope.Name == name { + return true, nil + } + } + + return false, nil +} + +func (s SecretScope) ResourceDescription() ResourceDescription { + return ResourceDescription{ + SingularName: "secret_scope", + PluralName: "secret_scopes", + SingularTitle: "Secret Scope", + PluralTitle: "Secret Scope", + TerraformResourceName: "databricks_secret_scope", + } +} + +func (s SecretScope) TerraformResourceName() string { + return "databricks_secret_scope" +} + +func (s SecretScope) GetName() string { + return s.Name +} + +func (s SecretScope) GetURL() string { + // Secret scopes do not have a URL + return "" +} + +func (s SecretScope) InitializeURL(_ url.URL) { + // Secret scopes do not have a URL +} + +func (s SecretScope) IsNil() bool { + return s.SecretScope == nil +} diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 7424966fe7..783024514d 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -7,6 +7,8 @@ import ( "strings" "testing" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/databricks-sdk-go/service/serving" "github.com/databricks/cli/bundle/config/resources" @@ -171,6 +173,12 @@ func TestResourcesBindSupport(t *testing.T) { CreateServingEndpoint: &serving.CreateServingEndpoint{}, }, }, + SecretScopes: map[string]*resources.SecretScope{ + "my_secret_scope": { + Name: "0", + SecretScope: &workspace.SecretScope{}, + }, + }, } unbindableResources := map[string]bool{"model": true} @@ -187,6 +195,9 @@ func TestResourcesBindSupport(t *testing.T) { m.GetMockAppsAPI().EXPECT().GetByName(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockQualityMonitorsAPI().EXPECT().Get(mock.Anything, mock.Anything).Return(nil, nil) m.GetMockServingEndpointsAPI().EXPECT().Get(mock.Anything, mock.Anything).Return(nil, nil) + m.GetMockSecretsAPI().EXPECT().ListScopesAll(mock.Anything).Return([]workspace.SecretScope{ + {Name: "0"}, + }, nil) allResources := supportedResources.AllResources() for _, group := range allResources { @@ -200,6 +211,9 @@ func TestResourcesBindSupport(t *testing.T) { // bind operation requires Exists to return true exists, err := r.Exists(ctx, m.WorkspaceClient, "0") + if err != nil { + panic(err) + } assert.NoError(t, err) assert.True(t, exists) } diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index d549b97973..e20259dfe8 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/deploy/terraform/tfdyn" @@ -211,6 +213,16 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { } cur.Name = instance.Attributes.Name config.Resources.Apps[resource.Name] = cur + case "databricks_secret_scope": + if config.Resources.SecretScopes == nil { + config.Resources.SecretScopes = make(map[string]*resources.SecretScope) + } + cur := config.Resources.SecretScopes[resource.Name] + if cur == nil { + cur = &resources.SecretScope{ModifiedStatus: resources.ModifiedStatusDeleted, SecretScope: &workspace.SecretScope{}} + } + cur.Name = instance.Attributes.Name + config.Resources.SecretScopes[resource.Name] = cur case "databricks_permissions": case "databricks_grants": // Ignore; no need to pull these back into the configuration. @@ -280,6 +292,11 @@ func TerraformToBundle(state *resourcesState, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.SecretScopes { + if src.ModifiedStatus == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } return nil } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index bbad15acb3..b62ab5f531 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -5,6 +5,8 @@ import ( "reflect" "testing" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/internal/tf/schema" @@ -703,6 +705,14 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { {Attributes: stateInstanceAttributes{Name: "app1"}}, }, }, + { + Type: "databricks_secret_scope", + Mode: "managed", + Name: "test_secret_scope", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{Name: "secret_scope1"}}, + }, + }, }, } err := TerraformToBundle(&tfState, &config) @@ -834,6 +844,13 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + SecretScopes: map[string]*resources.SecretScope{ + "test_secret_scope": { + SecretScope: &workspace.SecretScope{ + Name: "test_secret_scope", + }, + }, + }, }, } tfState := resourcesState{ @@ -1028,6 +1045,18 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + SecretScopes: map[string]*resources.SecretScope{ + "test_secret_scope": { + SecretScope: &workspace.SecretScope{ + Name: "test_secret_scope", + }, + }, + "test_secret_scope_new": { + SecretScope: &workspace.SecretScope{ + Name: "test_secret_scope_new", + }, + }, + }, }, } tfState := resourcesState{ diff --git a/bundle/deploy/terraform/tfdyn/convert_secret_scope.go b/bundle/deploy/terraform/tfdyn/convert_secret_scope.go new file mode 100644 index 0000000000..9dcd970309 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_secret_scope.go @@ -0,0 +1,30 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type secretScopeConverter struct{} + +func (s secretScopeConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(workspace.SecretScope{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "secret scope normalization diagnostic: %s", diag.Summary) + } + + // Add the converted resource to the output. + out.SecretScope[key] = vout.AsAny() + + return nil +} + +func init() { + registerConverter("secret_scopes", secretScopeConverter{}) +} diff --git a/bundle/docsgen/output/reference.md b/bundle/docsgen/output/reference.md index c51c5c190a..9476587d1a 100644 --- a/bundle/docsgen/output/reference.md +++ b/bundle/docsgen/output/reference.md @@ -1,7 +1,7 @@ --- description: 'Configuration reference for databricks.yml' last_update: - date: 2025-04-01 + date: 2025-04-23 --- @@ -438,6 +438,10 @@ resources: - Map - The schema definitions for the bundle, where each key is the name of the schema. See [\_](/dev-tools/bundles/resources.md#schemas). +- - `secret_scopes` + - Map + - + - - `volumes` - Map - The volume definitions for the bundle, where each key is the name of the volume. See [\_](/dev-tools/bundles/resources.md#volumes). @@ -921,6 +925,10 @@ The resource definitions for the target. - Map - The schema definitions for the bundle, where each key is the name of the schema. See [\_](/dev-tools/bundles/resources.md#schemas). +- - `secret_scopes` + - Map + - + - - `volumes` - Map - The volume definitions for the bundle, where each key is the name of the volume. See [\_](/dev-tools/bundles/resources.md#volumes). diff --git a/bundle/docsgen/output/resources.md b/bundle/docsgen/output/resources.md index b8f7ec5dd8..3738ca85a3 100644 --- a/bundle/docsgen/output/resources.md +++ b/bundle/docsgen/output/resources.md @@ -1,7 +1,7 @@ --- description: 'Learn about resources supported by Databricks Asset Bundles and how to configure them.' last_update: - date: 2025-04-01 + date: 2025-04-23 --- @@ -508,19 +508,19 @@ for deployment to the app compute. - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -1446,19 +1446,19 @@ destination needs to be provided, e.g. - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -1615,19 +1615,19 @@ In addition, if you attempt to deploy a bundle that contains a dashboard JSON fi - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -1717,19 +1717,19 @@ resources: - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -1820,7 +1820,7 @@ jobs: - - `job_clusters` - Sequence - - A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. If more than 100 job clusters are available, you can paginate through them using :method:jobs/get. See [\_](#jobsnamejob_clusters). + - A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. See [\_](#jobsnamejob_clusters). - - `max_concurrent_runs` - Integer @@ -1840,7 +1840,7 @@ jobs: - - `performance_target` - String - - PerformanceTarget defines how performant or cost efficient the execution of run on serverless should be. + - The performance mode on a serverless job. The performance target determines the level of compute performance or cost-efficiency for the run. * `STANDARD`: Enables cost-efficient execution of serverless workloads. * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and optimized cluster performance. - - `permissions` - Sequence @@ -1864,7 +1864,7 @@ jobs: - - `tasks` - Sequence - - A list of task specifications to be executed by this job. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available. See [\_](#jobsnametasks). + - A list of task specifications to be executed by this job. It supports up to 1000 elements in write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available. See [\_](#jobsnametasks). - - `timeout_seconds` - Integer @@ -2036,6 +2036,10 @@ In this minimal environment spec, only pip dependencies are supported. - Sequence - List of pip dependencies, as supported by the version of pip in this environment. +- - `jar_dependencies` + - Sequence + - List of jar dependencies, should be string representing volume paths. For example: `/Volumes/path/to/test.jar`. + ::: @@ -2193,7 +2197,6 @@ An optional set of health rules that can be defined for this job. **`Type: Sequence`** A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings. -If more than 100 job clusters are available, you can paginate through them using :method:jobs/get. @@ -3049,19 +3052,19 @@ Job-level parameter definitions - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -3148,7 +3151,8 @@ An optional periodic schedule for this job. The default behavior is that the job **`Type: Sequence`** A list of task specifications to be executed by this job. -If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available. +It supports up to 1000 elements in write endpoints (:method:jobs/create, :method:jobs/reset, :method:jobs/update, :method:jobs/submit). +Read endpoints return only 100 tasks. If more than 100 tasks are available, you can paginate through them using :method:jobs/get. Use the `next_page_token` field at the object root to determine if more results are available. @@ -3166,6 +3170,10 @@ If more than 100 tasks are available, you can paginate through them using :metho - Map - The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present. The condition task does not require a cluster to execute and does not support retries or notifications. See [\_](#jobsnametaskscondition_task). +- - `dashboard_task` + - Map + - The task runs a DashboardTask when the `dashboard_task` field is present. See [\_](#jobsnametasksdashboard_task). + - - `dbt_task` - Map - The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse. See [\_](#jobsnametasksdbt_task). @@ -3200,7 +3208,7 @@ If more than 100 tasks are available, you can paginate through them using :metho - - `gen_ai_compute_task` - Map - - Next field: 9. See [\_](#jobsnametasksgen_ai_compute_task). + - See [\_](#jobsnametasksgen_ai_compute_task). - - `health` - Map @@ -3238,6 +3246,10 @@ If more than 100 tasks are available, you can paginate through them using :metho - Map - The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported. See [\_](#jobsnametaskspipeline_task). +- - `power_bi_task` + - Map + - The task triggers a Power BI semantic model update when the `power_bi_task` field is present. See [\_](#jobsnametaskspower_bi_task). + - - `python_wheel_task` - Map - The task runs a Python wheel when the `python_wheel_task` field is present. See [\_](#jobsnametaskspython_wheel_task). @@ -3349,6 +3361,89 @@ The condition task does not require a cluster to execute and does not support re ::: +### jobs._name_.tasks.dashboard_task + +**`Type: Map`** + +The task runs a DashboardTask when the `dashboard_task` field is present. + + + +:::list-table + +- - Key + - Type + - Description + +- - `dashboard_id` + - String + - + +- - `subscription` + - Map + - See [\_](#jobsnametasksdashboard_tasksubscription). + +- - `warehouse_id` + - String + - The warehouse id to execute the dashboard with for the schedule + +::: + + +### jobs._name_.tasks.dashboard_task.subscription + +**`Type: Map`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `custom_subject` + - String + - Optional: Allows users to specify a custom subject line on the email sent to subscribers. + +- - `paused` + - Boolean + - When true, the subscription will not send emails. + +- - `subscribers` + - Sequence + - See [\_](#jobsnametasksdashboard_tasksubscriptionsubscribers). + +::: + + +### jobs._name_.tasks.dashboard_task.subscription.subscribers + +**`Type: Sequence`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `destination_id` + - String + - + +- - `user_name` + - String + - + +::: + + ### jobs._name_.tasks.dbt_task **`Type: Map`** @@ -3494,7 +3589,7 @@ The task executes a nested task for every input provided when the `for_each_task **`Type: Map`** -Next field: 9 + @@ -3510,7 +3605,7 @@ Next field: 9 - - `compute` - Map - - Next field: 4. See [\_](#jobsnametasksgen_ai_compute_taskcompute). + - See [\_](#jobsnametasksgen_ai_compute_taskcompute). - - `dl_runtime_image` - String @@ -3543,7 +3638,7 @@ Next field: 9 **`Type: Map`** -Next field: 4 + @@ -4601,6 +4696,113 @@ The task triggers a pipeline update when the `pipeline_task` field is present. O ::: +### jobs._name_.tasks.power_bi_task + +**`Type: Map`** + +The task triggers a Power BI semantic model update when the `power_bi_task` field is present. + + + +:::list-table + +- - Key + - Type + - Description + +- - `connection_resource_name` + - String + - The resource name of the UC connection to authenticate from Databricks to Power BI + +- - `power_bi_model` + - Map + - The semantic model to update. See [\_](#jobsnametaskspower_bi_taskpower_bi_model). + +- - `refresh_after_update` + - Boolean + - Whether the model should be refreshed after the update + +- - `tables` + - Sequence + - The tables to be exported to Power BI. See [\_](#jobsnametaskspower_bi_tasktables). + +- - `warehouse_id` + - String + - The SQL warehouse ID to use as the Power BI data source + +::: + + +### jobs._name_.tasks.power_bi_task.power_bi_model + +**`Type: Map`** + +The semantic model to update + + + +:::list-table + +- - Key + - Type + - Description + +- - `authentication_method` + - String + - How the published Power BI model authenticates to Databricks + +- - `model_name` + - String + - The name of the Power BI model + +- - `overwrite_existing` + - Boolean + - Whether to overwrite existing Power BI models + +- - `storage_mode` + - String + - The default storage mode of the Power BI model + +- - `workspace_name` + - String + - The name of the Power BI workspace of the model + +::: + + +### jobs._name_.tasks.power_bi_task.tables + +**`Type: Sequence`** + +The tables to be exported to Power BI + + + +:::list-table + +- - Key + - Type + - Description + +- - `catalog` + - String + - The catalog name in Databricks + +- - `name` + - String + - The table name in Databricks + +- - `schema` + - String + - The schema name in Databricks + +- - `storage_mode` + - String + - The Power BI storage mode of the table + +::: + + ### jobs._name_.tasks.python_wheel_task **`Type: Map`** @@ -5470,7 +5672,7 @@ model_serving_endpoints: - - `ai_gateway` - Map - - The AI Gateway configuration for the serving endpoint. NOTE: Only external model and provisioned throughput endpoints are currently supported. See [\_](#model_serving_endpointsnameai_gateway). + - The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables. See [\_](#model_serving_endpointsnameai_gateway). - - `budget_policy_id` - String @@ -5531,7 +5733,7 @@ resources: **`Type: Map`** -The AI Gateway configuration for the serving endpoint. NOTE: Only external model and provisioned throughput endpoints are currently supported. +The AI Gateway configuration for the serving endpoint. NOTE: External model, provisioned throughput, and pay-per-token endpoints are fully supported; agent endpoints currently only support inference tables. @@ -6475,19 +6677,19 @@ The list of routes that define traffic to each served entity. - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -6711,19 +6913,19 @@ Tags: Additional metadata key-value pairs for this `model_version`. - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -6882,7 +7084,7 @@ pipelines: - - `trigger` - Map - - Which pipeline trigger to use. Deprecated: Use `continuous` instead. See [\_](#pipelinesnametrigger). + - Use continuous instead ::: @@ -8002,7 +8204,7 @@ Libraries or code needed by this deployment. - - `whl` - String - - URI of the whl to be installed. + - This field is deprecated ::: @@ -8122,19 +8324,19 @@ List of notification settings for this pipeline. - - `group_name` - String - - The name of the group that has the permission set in level. + - - - `level` - String - - The allowed permission for user, group, service principal defined for this permission. + - - - `service_principal_name` - String - - The name of the service principal that has the permission set in level. + - - - `user_name` - String - - The name of the user that has the permission set in level. + - ::: @@ -8203,63 +8405,6 @@ Only `user_name` or `service_principal_name` can be specified. If both are speci ::: -### pipelines._name_.trigger - -**`Type: Map`** - -Which pipeline trigger to use. Deprecated: Use `continuous` instead. - - - -:::list-table - -- - Key - - Type - - Description - -- - `cron` - - Map - - See [\_](#pipelinesnametriggercron). - -- - `manual` - - Map - - - -::: - - -### pipelines._name_.trigger.cron - -**`Type: Map`** - - - - - -:::list-table - -- - Key - - Type - - Description - -- - `quartz_cron_schedule` - - String - - - -- - `timezone_id` - - String - - - -::: - - -### pipelines._name_.trigger.manual - -**`Type: Map`** - - - - ## quality_monitors **`Type: Map`** @@ -8800,6 +8945,44 @@ resources: ::: +## secret_scopes + +**`Type: Map`** + + + +```yaml +secret_scopes: + : + : +``` + + +:::list-table + +- - Key + - Type + - Description + +- - `backend_type` + - String + - + +- - `initial_manage_principal` + - String + - + +- - `keyvault_metadata` + - Map + - See [\_](#secret_scopesnamekeyvault_metadata). + +- - `name` + - String + - + +::: + + ## volumes **`Type: Map`** @@ -8849,7 +9032,7 @@ volumes: - - `volume_type` - String - - + - The type of the volume. An external volume is located in the specified external location. A managed volume is located in the default location which is specified by the parent schema, or the parent catalog, or the Metastore. [Learn more](https://docs.databricks.com/aws/en/volumes/managed-vs-external) ::: diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index bc445055d5..4142801269 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -199,6 +199,9 @@ github.com/databricks/cli/bundle/config.Resources: The schema definitions for the bundle, where each key is the name of the schema. "markdown_description": |- The schema definitions for the bundle, where each key is the name of the schema. See [\_](/dev-tools/bundles/resources.md#schemas). + "secret_scopes": + "description": |- + PLACEHOLDER "volumes": "description": |- The volume definitions for the bundle, where each key is the name of the volume. @@ -532,6 +535,19 @@ github.com/databricks/cli/bundle/config/resources.PipelinePermission: "user_name": "description": |- PLACEHOLDER +github.com/databricks/cli/bundle/config/resources.SecretScope: + "backend_type": + "description": |- + PLACEHOLDER + "initial_manage_principal": + "description": |- + PLACEHOLDER + "keyvault_metadata": + "description": |- + PLACEHOLDER + "name": + "description": |- + PLACEHOLDER github.com/databricks/cli/bundle/config/variable.Lookup: "alert": "description": |- diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index e764c5bcff..d2a88b1158 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -1305,6 +1305,36 @@ } ] }, + "resources.SecretScope": { + "oneOf": [ + { + "type": "object", + "properties": { + "backend_type": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/workspace.ScopeBackendType" + }, + "initial_manage_principal": { + "$ref": "#/$defs/string" + }, + "keyvault_metadata": { + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/workspace.AzureKeyVaultSecretScopeMetadata" + }, + "name": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "name", + "initial_manage_principal" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.Volume": { "oneOf": [ { @@ -1844,6 +1874,9 @@ "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Schema", "markdownDescription": "The schema definitions for the bundle, where each key is the name of the schema. See [schemas](https://docs.databricks.com/dev-tools/bundles/resources.html#schemas)." }, + "secret_scopes": { + "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.SecretScope" + }, "volumes": { "description": "The volume definitions for the bundle, where each key is the name of the volume.", "$ref": "#/$defs/map/github.com/databricks/cli/bundle/config/resources.Volume", @@ -7368,6 +7401,33 @@ "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" } ] + }, + "workspace.AzureKeyVaultSecretScopeMetadata": { + "oneOf": [ + { + "type": "object", + "properties": { + "dns_name": { + "$ref": "#/$defs/string" + }, + "resource_id": { + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "dns_name", + "resource_id" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "workspace.ScopeBackendType": { + "type": "string" } } } @@ -7588,6 +7648,20 @@ } ] }, + "resources.SecretScope": { + "oneOf": [ + { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/github.com/databricks/cli/bundle/config/resources.SecretScope" + } + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, "resources.Volume": { "oneOf": [ {