From 2ecdb60685bcfac1d72a839f97cc530f94494bf5 Mon Sep 17 00:00:00 2001 From: Oriol Date: Tue, 23 Sep 2025 17:42:27 +0200 Subject: [PATCH 1/8] feat: Adds support for Service Account credentials as provider inputs, environment variables and AWS Secrets Manager (#3700) * implement service account as credentials * env variables support * aws secret support * changed condition of warning of missing credentials * changelog * change factory * Revert "change factory" This reverts commit 40e19ae5f7628b1c4811c512c6611d7b4f3ba9a0. * refactor * don't fail when no valid auth method * try: fix data race failure * Revert "try: fix data race failure" This reverts commit 9c593554b2274e1ac3bb2f153994985b8cea7530. * prevent data races in HTTP client mocking for OAuth2 authentication * skip reset * add test * run test in CI * fix * matrix remove * Revert "matrix remove" This reverts commit 9d901bfb5c8e1a29344d8ef6235f6593306c24ef. * Revert "fix" This reverts commit d2fa94b429764dadc502810cf937b460f2276871. * execute only relevant test * remove increased timeout * Apply suggestions from code review Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> * use enum instead of string * pr suggestions * pr comments doc * remove matrix * use credential provider in auth helper methods * error instead of warning when no credetials are set * typo * set env vars * unit test only --------- Co-authored-by: Leo Antoli <430982+lantoli@users.noreply.github.com> --- .changelog/3700.txt | 3 + .github/workflows/acceptance-tests-runner.yml | 25 +++- .github/workflows/acceptance-tests.yml | 2 + internal/config/client.go | 109 ++++++++++++++++-- internal/provider/credentials.go | 16 +-- internal/provider/provider.go | 48 ++++++-- .../provider/provider_authentication_test.go | 25 ++++ internal/provider/provider_sdk2.go | 55 +++++++-- .../service/advancedclustertpf/main_test.go | 12 ++ internal/testutil/acc/pre_check.go | 8 ++ internal/testutil/unit/http_mocker.go | 5 + internal/testutil/unit/provider_mock.go | 45 +++++++- 12 files changed, 311 insertions(+), 42 deletions(-) create mode 100644 .changelog/3700.txt diff --git a/.changelog/3700.txt b/.changelog/3700.txt new file mode 100644 index 0000000000..58ab43ce66 --- /dev/null +++ b/.changelog/3700.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +provider: Supports Service Account as credentials to authenticate the provider +``` diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 6847c90140..f555181663 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -171,6 +171,10 @@ on: required: true mongodb_atlas_rp_public_key: required: true + mongodb_atlas_client_id: + required: true + mongodb_atlas_client_secret: + required: true azure_directory_id: required: true azure_resource_group_name: @@ -245,7 +249,7 @@ jobs: mustTrigger: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.test_group == '' ) }} outputs: # ensure resources are sorted alphabetically advanced_cluster: ${{ steps.filter.outputs.advanced_cluster == 'true' || env.mustTrigger == 'true' }} - assume_role: ${{ steps.filter.outputs.assume_role == 'true' || env.mustTrigger == 'true' }} + authentication: ${{ steps.filter.outputs.authentication == 'true' || env.mustTrigger == 'true' }} autogen: ${{ steps.filter.outputs.autogen == 'true' || env.mustTrigger == 'true' }} backup: ${{ steps.filter.outputs.backup == 'true' || env.mustTrigger == 'true' }} control_plane_ip_addresses: ${{ steps.filter.outputs.control_plane_ip_addresses == 'true' || env.mustTrigger == 'true' }} @@ -278,7 +282,7 @@ jobs: filters: | advanced_cluster: - 'internal/service/advancedclustertpf/*.go' - assume_role: + authentication: - 'internal/provider/*.go' autogen: - 'internal/common/autogen/*.go' @@ -460,9 +464,9 @@ jobs: ./internal/service/advancedclustertpf run: make testacc - assume_role: + authentication: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.assume_role == 'true' || inputs.test_group == 'assume_role' }} + if: ${{ needs.change-detection.outputs.authentication == 'true' || inputs.test_group == 'authentication' }} runs-on: ubuntu-latest permissions: {} steps: @@ -485,7 +489,7 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.aws_access_key_id }} ASSUME_ROLE_ARN: ${{ vars.ASSUME_ROLE_ARN }} run: bash ./scripts/generate-credentials-with-sts-assume-role.sh - - name: Acceptance Tests + - name: Acceptance Tests (STS Assume Role) env: MONGODB_ATLAS_PUBLIC_KEY: "" MONGODB_ATLAS_PRIVATE_KEY: "" @@ -497,6 +501,17 @@ jobs: AWS_SECRET_ACCESS_KEY: ${{ steps.sts-assume-role.outputs.aws_secret_access_key }} AWS_SESSION_TOKEN: ${{ steps.sts-assume-role.outputs.AWS_SESSION_TOKEN }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_REGEX_RUN: '^TestAccSTSAssumeRole' + ACCTEST_PACKAGES: ./internal/provider + run: make testacc + - name: Acceptance Tests (Service Account) + env: + MONGODB_ATLAS_PUBLIC_KEY: "" + MONGODB_ATLAS_PRIVATE_KEY: "" + MONGODB_ATLAS_CLIENT_ID: ${{ secrets.mongodb_atlas_client_id }} + MONGODB_ATLAS_CLIENT_SECRET: ${{ secrets.mongodb_atlas_client_secret }} + MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_REGEX_RUN: '^TestAccServiceAccount' ACCTEST_PACKAGES: ./internal/provider run: make testacc diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index 092593d223..e30844f365 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -65,6 +65,8 @@ jobs: mongodb_atlas_gov_private_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_GOV_PRIVATE_KEY_QA || secrets.MONGODB_ATLAS_GOV_PRIVATE_KEY_DEV }} mongodb_atlas_rp_public_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_RP_PUBLIC_KEY_QA || secrets.MONGODB_ATLAS_RP_PUBLIC_KEY_DEV }} mongodb_atlas_rp_private_key: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_RP_PRIVATE_KEY_QA || secrets.MONGODB_ATLAS_RP_PRIVATE_KEY_DEV }} + mongodb_atlas_client_id: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_CLIENT_ID_QA || secrets.MONGODB_ATLAS_CLIENT_ID_DEV }} + mongodb_atlas_client_secret: ${{ inputs.atlas_cloud_env == 'qa' && secrets.MONGODB_ATLAS_CLIENT_SECRET_QA || secrets.MONGODB_ATLAS_CLIENT_SECRET_DEV }} ca_cert: ${{ secrets.CA_CERT }} aws_account_id: ${{ secrets.AWS_ACCOUNT_ID }} aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/internal/config/client.go b/internal/config/client.go index 7004fb63a3..00497c80ae 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -25,6 +25,10 @@ import ( "github.com/spf13/cast" "github.com/mongodb/terraform-provider-mongodbatlas/version" + + "go.mongodb.org/atlas-sdk/v20250312007/auth/clientcredentials" + + "go.mongodb.org/atlas-sdk/v20250312007/auth" ) const ( @@ -40,6 +44,37 @@ const ( expectContinueTimeout = 1 * time.Second ) +type AuthMethod int + +const ( + ServiceAccount AuthMethod = iota + Digest + Unknown +) + +// CredentialProvider interface for types that can provide MongoDB Atlas credentials +type CredentialProvider interface { + GetPublicKey() string + GetPrivateKey() string + GetClientID() string + GetClientSecret() string +} + +// IsDigestAuth checks if public/private key credentials are present +func IsDigestAuth(cp CredentialProvider) bool { + return cp.GetPublicKey() != "" && cp.GetPrivateKey() != "" +} + +// IsServiceAccountAuth checks if client ID/secret credentials are present +func IsServiceAccountAuth(cp CredentialProvider) bool { + return cp.GetClientID() != "" && cp.GetClientSecret() != "" +} + +// HasValidAuthCredentials checks if any valid authentication method is provided +func HasValidAuthCredentials(cp CredentialProvider) bool { + return IsDigestAuth(cp) || IsServiceAccountAuth(cp) +} + var baseTransport = &http.Transport{ DialContext: (&net.Dialer{ Timeout: timeout, @@ -71,9 +106,17 @@ type Config struct { BaseURL string RealmBaseURL string TerraformVersion string + ClientID string + ClientSecret string PreviewV2AdvancedClusterEnabled bool } +// CredentialProvider implementation for Config +func (c *Config) GetPublicKey() string { return c.PublicKey } +func (c *Config) GetPrivateKey() string { return c.PrivateKey } +func (c *Config) GetClientID() string { return c.ClientID } +func (c *Config) GetClientSecret() string { return c.ClientSecret } + type AssumeRole struct { Tags map[string]string RoleARN string @@ -97,15 +140,56 @@ type UAMetadata struct { } func (c *Config) NewClient(ctx context.Context) (any, error) { - // Network Logging transport is before Digest transport so it can log the first Digest requests with 401 Unauthorized. - // Terraform logging transport is after Digest transport so the Unauthorized request bodies are not logged. + // Network Logging transport is before authentication transport so it can log authentication requests networkLoggingTransport := NewTransportWithNetworkLogging(baseTransport, logging.IsDebugOrHigher()) - digestTransport := digest.NewTransportWithHTTPRoundTripper(cast.ToString(c.PublicKey), cast.ToString(c.PrivateKey), networkLoggingTransport) - // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. - tfLoggingTransport := logging.NewTransport("Atlas", digestTransport) - client := &http.Client{Transport: tfLoggingTransport} - optsAtlas := []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} + var client *http.Client + var optsAtlas []matlasClient.ClientOpt + + // Determine authentication method based on available credentials + switch ResolveAuthMethod(c) { + case ServiceAccount: + conf := clientcredentials.NewConfig(c.ClientID, c.ClientSecret) + // Override TokenURL and RevokeURL if custom BaseURL is provided + if c.BaseURL != "" { + baseURL := strings.TrimRight(c.BaseURL, "/") + conf.TokenURL = baseURL + clientcredentials.TokenAPIPath + conf.RevokeURL = baseURL + clientcredentials.RevokeAPIPath + } + + // Create a base HTTP client for token acquisition + baseHTTPClient := &http.Client{ + Transport: networkLoggingTransport, + } + + // Set the HTTP client in context for token acquisition + ctx = context.WithValue(ctx, auth.HTTPClient, baseHTTPClient) + + tokenSource := conf.TokenSource(ctx) + + // Acquire an initial token upfront for several reasons: + // 1. OAuth2 token caching: The oauth2 library only caches tokens after successful acquisition + // 2. Early credential validation: Fail fast during provider init rather than first resource operation + // 3. Performance: Subsequent requests use cached tokens instead of blocking for token acquisition + _, err := tokenSource.Token() + if err != nil { + return nil, fmt.Errorf("failed to acquire OAuth2 token: %w", err) + } + + oauthClient := auth.NewClient(ctx, tokenSource) + tfLoggingTransport := logging.NewTransport("Atlas", oauthClient.Transport) + oauthClient.Transport = tfLoggingTransport + client = oauthClient + optsAtlas = []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} + case Digest: + digestTransport := digest.NewTransportWithHTTPRoundTripper(cast.ToString(c.PublicKey), cast.ToString(c.PrivateKey), networkLoggingTransport) + // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. + tfLoggingTransport := logging.NewTransport("Atlas", digestTransport) + client = &http.Client{Transport: tfLoggingTransport} + optsAtlas = []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} + case Unknown: + } + if c.BaseURL != "" { optsAtlas = append(optsAtlas, matlasClient.SetBaseURL(c.BaseURL)) } @@ -319,3 +403,14 @@ func userAgent(c *Config) string { return strings.Join(parts, " ") } + +// ResolveAuthMethod determines the authentication method from any credential provider +func ResolveAuthMethod(cg CredentialProvider) AuthMethod { + if IsServiceAccountAuth(cg) { + return ServiceAccount + } + if IsDigestAuth(cg) { + return Digest + } + return Unknown +} diff --git a/internal/provider/credentials.go b/internal/provider/credentials.go index a0482aa8cc..e945c1a9b6 100644 --- a/internal/provider/credentials.go +++ b/internal/provider/credentials.go @@ -74,16 +74,18 @@ func configureCredentialsSTS(cfg *config.Config, secret, region, awsAccessKeyID, if err != nil { return *cfg, err } - if secretData.PrivateKey == "" { - return *cfg, fmt.Errorf("secret missing value for credential PrivateKey") - } - if secretData.PublicKey == "" { - return *cfg, fmt.Errorf("secret missing value for credential PublicKey") + switch config.ResolveAuthMethod(&secretData) { + case config.Digest: + cfg.PublicKey = secretData.PublicKey + cfg.PrivateKey = secretData.PrivateKey + case config.ServiceAccount: + cfg.ClientID = secretData.ClientID + cfg.ClientSecret = secretData.ClientSecret + case config.Unknown: + return *cfg, fmt.Errorf("secret missing value for supported credentials: PrivateKey/PublicKey or ClientID/ClientSecret") } - cfg.PublicKey = secretData.PublicKey - cfg.PrivateKey = secretData.PrivateKey return *cfg, nil } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 300afddbaf..6580aa29ec 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -61,7 +61,7 @@ const ( MongodbGovCloudQAURL = "https://cloud-qa.mongodbgov.com" MongodbGovCloudDevURL = "https://cloud-dev.mongodbgov.com" ProviderConfigError = "error in configuring the provider." - MissingAuthAttrError = "either Atlas Programmatic API Keys or AWS Secrets Manager attributes must be set" + MissingAuthAttrError = "either AWS Secrets Manager, Service Accounts or Atlas Programmatic API Keys attributes must be set" ProviderMetaUserAgentExtra = "user_agent_extra" ProviderMetaUserAgentExtraDesc = "You can extend the user agent header for each request made by the provider to the Atlas Admin API. The Key Values will be formatted as {key}/{value}." ProviderMetaModuleName = "module_name" @@ -75,16 +75,18 @@ type MongodbtlasProvider struct { type tfMongodbAtlasProviderModel struct { AssumeRole types.List `tfsdk:"assume_role"` - PublicKey types.String `tfsdk:"public_key"` + Region types.String `tfsdk:"region"` PrivateKey types.String `tfsdk:"private_key"` BaseURL types.String `tfsdk:"base_url"` RealmBaseURL types.String `tfsdk:"realm_base_url"` SecretName types.String `tfsdk:"secret_name"` - Region types.String `tfsdk:"region"` + PublicKey types.String `tfsdk:"public_key"` StsEndpoint types.String `tfsdk:"sts_endpoint"` AwsAccessKeyID types.String `tfsdk:"aws_access_key_id"` AwsSecretAccessKeyID types.String `tfsdk:"aws_secret_access_key"` AwsSessionToken types.String `tfsdk:"aws_session_token"` + ClientID types.String `tfsdk:"client_id"` + ClientSecret types.String `tfsdk:"client_secret"` IsMongodbGovCloud types.Bool `tfsdk:"is_mongodbgov_cloud"` } @@ -188,6 +190,14 @@ func (p *MongodbtlasProvider) Schema(ctx context.Context, req provider.SchemaReq Optional: true, Description: "AWS Security Token Service provided session token.", }, + "client_id": schema.StringAttribute{ + Optional: true, + Description: "MongoDB Atlas Client ID for Service Account.", + }, + "client_secret": schema.StringAttribute{ + Optional: true, + Description: "MongoDB Atlas Client Secret for Service Account.", + }, }, } } @@ -276,6 +286,8 @@ func (p *MongodbtlasProvider) Configure(ctx context.Context, req provider.Config BaseURL: data.BaseURL.ValueString(), RealmBaseURL: data.RealmBaseURL.ValueString(), TerraformVersion: req.TerraformVersion, + ClientID: data.ClientID.ValueString(), + ClientSecret: data.ClientSecret.ValueString(), } var assumeRoles []tfAssumeRoleModel @@ -386,9 +398,6 @@ func setDefaultValuesWithValidations(ctx context.Context, data *tfMongodbAtlasPr "MONGODB_ATLAS_PUBLIC_KEY", "MCLI_PUBLIC_API_KEY", }, "").(string)) - if data.PublicKey.ValueString() == "" && !awsRoleDefined { - resp.Diagnostics.AddWarning(ProviderConfigError, MissingAuthAttrError) - } } if data.PrivateKey.ValueString() == "" { @@ -397,9 +406,6 @@ func setDefaultValuesWithValidations(ctx context.Context, data *tfMongodbAtlasPr "MONGODB_ATLAS_PRIVATE_KEY", "MCLI_PRIVATE_API_KEY", }, "").(string)) - if data.PrivateKey.ValueString() == "" && !awsRoleDefined { - resp.Diagnostics.AddWarning(ProviderConfigError, MissingAuthAttrError) - } } if data.RealmBaseURL.ValueString() == "" { @@ -450,6 +456,30 @@ func setDefaultValuesWithValidations(ctx context.Context, data *tfMongodbAtlasPr }, "").(string)) } + if data.ClientID.ValueString() == "" { + data.ClientID = types.StringValue(MultiEnvDefaultFunc([]string{ + "MONGODB_ATLAS_CLIENT_ID", + "TF_VAR_CLIENT_ID", + }, "").(string)) + } + + if data.ClientSecret.ValueString() == "" { + data.ClientSecret = types.StringValue(MultiEnvDefaultFunc([]string{ + "MONGODB_ATLAS_CLIENT_SECRET", + "TF_VAR_CLIENT_SECRET", + }, "").(string)) + } + + // Check if any valid authentication method is provided + if !config.HasValidAuthCredentials(&config.Config{ + PublicKey: data.PublicKey.ValueString(), + PrivateKey: data.PrivateKey.ValueString(), + ClientID: data.ClientID.ValueString(), + ClientSecret: data.ClientSecret.ValueString(), + }) && !awsRoleDefined { + resp.Diagnostics.AddError(ProviderConfigError, MissingAuthAttrError) + } + return *data } diff --git a/internal/provider/provider_authentication_test.go b/internal/provider/provider_authentication_test.go index c91510427b..f52b115139 100644 --- a/internal/provider/provider_authentication_test.go +++ b/internal/provider/provider_authentication_test.go @@ -40,6 +40,24 @@ func TestAccSTSAssumeRole_basic(t *testing.T) { }) } +func TestAccServiceAccount_basic(t *testing.T) { + var ( + resourceName = "data.mongodbatlas_projects.test" + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckServiceAccount(t); acc.PreCheckRegularCredsAreEmpty(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Steps: []resource.TestStep{ + { + Config: configDataSourceProject(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "results.#"), + ), + }, + }, + }) +} + func configProject(orgID, projectName string) string { return fmt.Sprintf(` resource "mongodbatlas_project" "test" { @@ -48,3 +66,10 @@ func configProject(orgID, projectName string) string { } `, orgID, projectName) } + +func configDataSourceProject() string { + return ` + data "mongodbatlas_projects" "test" { + } + ` +} diff --git a/internal/provider/provider_sdk2.go b/internal/provider/provider_sdk2.go index cc904105bc..337c591c1a 100644 --- a/internal/provider/provider_sdk2.go +++ b/internal/provider/provider_sdk2.go @@ -57,10 +57,18 @@ import ( ) type SecretData struct { - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + PublicKey string `json:"public_key"` + PrivateKey string `json:"private_key"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` } +// CredentialProvider implementation for SecretData +func (s *SecretData) GetPublicKey() string { return s.PublicKey } +func (s *SecretData) GetPrivateKey() string { return s.PrivateKey } +func (s *SecretData) GetClientID() string { return s.ClientID } +func (s *SecretData) GetClientSecret() string { return s.ClientSecret } + // NewSdkV2Provider returns the provider to be use by the code. func NewSdkV2Provider() *schema.Provider { provider := &schema.Provider{ @@ -122,6 +130,16 @@ func NewSdkV2Provider() *schema.Provider { Optional: true, Description: "AWS Security Token Service provided session token.", }, + "client_id": { + Type: schema.TypeString, + Optional: true, + Description: "MongoDB Atlas Client ID for Service Account.", + }, + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "MongoDB Atlas Client Secret for Service Account.", + }, }, DataSourcesMap: getDataSourcesMap(), ResourcesMap: getResourcesMap(), @@ -283,6 +301,8 @@ func providerConfigure(provider *schema.Provider) func(ctx context.Context, d *s BaseURL: d.Get("base_url").(string), RealmBaseURL: d.Get("realm_base_url").(string), TerraformVersion: provider.TerraformVersion, + ClientID: d.Get("client_id").(string), + ClientSecret: d.Get("client_secret").(string), } assumeRoleValue, ok := d.GetOk("assume_role") @@ -353,9 +373,6 @@ func setDefaultsAndValidations(d *schema.ResourceData) diag.Diagnostics { }); err != nil { return append(diagnostics, diag.FromErr(err)...) } - if d.Get("public_key").(string) == "" && !awsRoleDefined { - diagnostics = append(diagnostics, diag.Diagnostic{Severity: diag.Warning, Summary: MissingAuthAttrError}) - } if err := setValueFromConfigOrEnv(d, "private_key", []string{ "MONGODB_ATLAS_PRIVATE_API_KEY", @@ -365,10 +382,6 @@ func setDefaultsAndValidations(d *schema.ResourceData) diag.Diagnostics { return append(diagnostics, diag.FromErr(err)...) } - if d.Get("private_key").(string) == "" && !awsRoleDefined { - diagnostics = append(diagnostics, diag.Diagnostic{Severity: diag.Warning, Summary: MissingAuthAttrError}) - } - if err := setValueFromConfigOrEnv(d, "realm_base_url", []string{ "MONGODB_REALM_BASE_URL", }); err != nil { @@ -417,6 +430,30 @@ func setDefaultsAndValidations(d *schema.ResourceData) diag.Diagnostics { return append(diagnostics, diag.FromErr(err)...) } + if err := setValueFromConfigOrEnv(d, "client_id", []string{ + "MONGODB_ATLAS_CLIENT_ID", + "TF_VAR_CLIENT_ID", + }); err != nil { + return append(diagnostics, diag.FromErr(err)...) + } + + if err := setValueFromConfigOrEnv(d, "client_secret", []string{ + "MONGODB_ATLAS_CLIENT_SECRET", + "TF_VAR_CLIENT_SECRET", + }); err != nil { + return append(diagnostics, diag.FromErr(err)...) + } + + // Check if any valid authentication method is provided + if !config.HasValidAuthCredentials(&config.Config{ + PublicKey: d.Get("public_key").(string), + PrivateKey: d.Get("private_key").(string), + ClientID: d.Get("client_id").(string), + ClientSecret: d.Get("client_secret").(string), + }) && !awsRoleDefined { + diagnostics = append(diagnostics, diag.Diagnostic{Severity: diag.Error, Summary: MissingAuthAttrError}) + } + return diagnostics } diff --git a/internal/service/advancedclustertpf/main_test.go b/internal/service/advancedclustertpf/main_test.go index 4f1d4d3054..690c892814 100644 --- a/internal/service/advancedclustertpf/main_test.go +++ b/internal/service/advancedclustertpf/main_test.go @@ -8,6 +8,18 @@ import ( ) func TestMain(m *testing.M) { + // Only modify credentials for unit tests. Preserve GH Actions env in acceptance (TF_ACC=1). + if os.Getenv("TF_ACC") == "" { + // If no credentials are provided, force digest auth to satisfy provider validation. + _ = os.Setenv("MONGODB_ATLAS_PUBLIC_API_KEY", "dummy") + _ = os.Setenv("MONGODB_ATLAS_PRIVATE_API_KEY", "dummy") + // Ensure Service Account auth is not selected if we just set digest + _ = os.Unsetenv("MONGODB_ATLAS_CLIENT_ID") + _ = os.Unsetenv("MONGODB_ATLAS_CLIENT_SECRET") + _ = os.Unsetenv("TF_VAR_CLIENT_ID") + _ = os.Unsetenv("TF_VAR_CLIENT_SECRET") + } + cleanup := acc.SetupSharedResources() exitCode := m.Run() cleanup() diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index e0de64463b..41b830c66c 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -377,3 +377,11 @@ func PreCheckAwsMsk(tb testing.TB) { tb.Fatal("`AWS_MSK_ARN` must be set for AWS MSK acceptance testing") } } + +func PreCheckServiceAccount(tb testing.TB) { + tb.Helper() + if os.Getenv("MONGODB_ATLAS_CLIENT_ID") == "" || + os.Getenv("MONGODB_ATLAS_CLIENT_SECRET") == "" { + tb.Fatal("`MONGODB_ATLAS_CLIENT_ID`, `MONGODB_ATLAS_CLIENT_SECRET` must be set for Service Account acceptance testing") + } +} diff --git a/internal/testutil/unit/http_mocker.go b/internal/testutil/unit/http_mocker.go index 3307ba2267..44ca54d5a7 100644 --- a/internal/testutil/unit/http_mocker.go +++ b/internal/testutil/unit/http_mocker.go @@ -80,6 +80,7 @@ type mockClientModifier struct { config *MockHTTPDataConfig mockRoundTripper http.RoundTripper oldRoundTripper http.RoundTripper + skipReset bool // When true, skip reset to avoid data races with shared clients } func (c *mockClientModifier) ModifyHTTPClient(httpClient *http.Client) error { @@ -89,6 +90,10 @@ func (c *mockClientModifier) ModifyHTTPClient(httpClient *http.Client) error { } func (c *mockClientModifier) ResetHTTPClient(httpClient *http.Client) { + // Skip reset when using copied HTTP clients to avoid data races + if c.skipReset { + return + } if c.oldRoundTripper != nil { httpClient.Transport = c.oldRoundTripper } diff --git a/internal/testutil/unit/provider_mock.go b/internal/testutil/unit/provider_mock.go index 5be1eaa890..8fab9b6856 100644 --- a/internal/testutil/unit/provider_mock.go +++ b/internal/testutil/unit/provider_mock.go @@ -46,16 +46,32 @@ func (p *ProviderMocked) Configure(ctx context.Context, req fwProvider.Configure if !ok { p.t.Fatal("Failed to cast ResourceData to MongoDBClient") } - httpClient := client.AtlasV2.GetConfig().HTTPClient - if httpClient == nil { + + // Create a copy of the HTTP client to avoid data races with OAuth2 background operations + originalClient := client.AtlasV2.GetConfig().HTTPClient + if originalClient == nil { p.t.Fatal("HTTPClient is nil, mocking will fail") } + + // Create a new HTTP client to avoid modifying the live one + mockedClient := &http.Client{ + Transport: originalClient.Transport, + Timeout: originalClient.Timeout, + } + if p.ClientModifier != nil { - err := p.ClientModifier.ModifyHTTPClient(httpClient) + // Since we're using a copied client, set skipReset to avoid data races + if mockModifier, ok := p.ClientModifier.(*mockClientModifier); ok { + mockModifier.skipReset = true + } + err := p.ClientModifier.ModifyHTTPClient(mockedClient) if err != nil { p.t.Fatal(err) } } + + // Replace the HTTP client in the Atlas configuration + client.AtlasV2.GetConfig().HTTPClient = mockedClient } func (p *ProviderMocked) DataSources(ctx context.Context) []func() datasource.DataSource { @@ -76,11 +92,30 @@ func muxProviderFactory(t *testing.T, clientModifier HTTPClientModifier) func() if !ok { t.Fatalf("Failed to cast response to MongoDBClient, Got type %T", resp) } - httpClient := client.AtlasV2.GetConfig().HTTPClient - err := clientModifier.ModifyHTTPClient(httpClient) + + // Create a copy of the HTTP client to avoid data races with OAuth2 background operations + originalClient := client.AtlasV2.GetConfig().HTTPClient + if originalClient == nil { + t.Fatalf("HTTPClient is nil, mocking will fail") + } + + // Create a new HTTP client to avoid modifying the live one + mockedClient := &http.Client{ + Transport: originalClient.Transport, + Timeout: originalClient.Timeout, + } + + // Since we're using a copied client, set skipReset to avoid data races + if mockModifier, ok := clientModifier.(*mockClientModifier); ok { + mockModifier.skipReset = true + } + err := clientModifier.ModifyHTTPClient(mockedClient) if err != nil { t.Fatalf("Failed to modify HTTPClient: %s", err) } + + // Replace the HTTP client in the Atlas configuration + client.AtlasV2.GetConfig().HTTPClient = mockedClient return resp, diags } fwProviderInstance := provider.NewFrameworkProvider() From e42e6c1bdbdb3bb5968a5f6539013d1e5553642a Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Fri, 26 Sep 2025 15:59:26 +0200 Subject: [PATCH 2/8] chore: Implement Service Account token caching & thread-safe concurrent access (#3711) --- .github/workflows/acceptance-tests-runner.yml | 24 +++++-- internal/config/client.go | 70 +++++-------------- internal/config/service_account.go | 51 ++++++++++++++ internal/service/advancedcluster/main_test.go | 12 ---- .../resource_organization_test.go | 8 +-- internal/testutil/acc/factory.go | 8 +++ internal/testutil/acc/pre_check.go | 6 +- internal/testutil/acc/skip.go | 5 +- 8 files changed, 101 insertions(+), 83 deletions(-) create mode 100644 internal/config/service_account.go diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 13eb304a8c..23855cd7fd 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -283,6 +283,7 @@ jobs: advanced_cluster: - 'internal/service/advancedcluster/*.go' authentication: + - 'internal/config/*.go' - 'internal/provider/*.go' autogen: - 'internal/common/autogen/*.go' @@ -407,8 +408,7 @@ jobs: MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} HTTP_MOCKER_CAPTURE: 'true' ACCTEST_REGEX_RUN: ${{ inputs.reduced_tests && '^TestAccMockable' || env.ACCTEST_REGEX_RUN }} - ACCTEST_PACKAGES: | - ./internal/service/advancedcluster + ACCTEST_PACKAGES: ./internal/service/advancedcluster run: make testacc advanced_cluster_tpf_mig_from_sdkv2: @@ -433,8 +433,7 @@ jobs: MONGODB_ATLAS_LAST_1X_VERSION: ${{ inputs.mongodb_atlas_last_1x_version }} MONGODB_ATLAS_TEST_SDKV2_TO_TPF: 'true' ACCTEST_REGEX_RUN: '^TestV1xMig' - ACCTEST_PACKAGES: | - ./internal/service/advancedcluster + ACCTEST_PACKAGES: ./internal/service/advancedcluster run: make testacc advanced_cluster_tpf_mig_from_tpf_preview: @@ -460,8 +459,7 @@ jobs: MONGODB_ATLAS_LAST_1X_VERSION: ${{ inputs.mongodb_atlas_last_1x_version }} MONGODB_ATLAS_TEST_SDKV2_TO_TPF: 'false' ACCTEST_REGEX_RUN: '^TestV1xMig' - ACCTEST_PACKAGES: | - ./internal/service/advancedcluster + ACCTEST_PACKAGES: ./internal/service/advancedcluster run: make testacc authentication: @@ -514,7 +512,19 @@ jobs: ACCTEST_REGEX_RUN: '^TestAccServiceAccount' ACCTEST_PACKAGES: ./internal/provider run: make testacc - + - name: Acceptance Tests (Service Account smoke tests) # small selection of fast tests to run with SA + env: + MONGODB_ATLAS_PUBLIC_KEY: "" + MONGODB_ATLAS_PRIVATE_KEY: "" + MONGODB_ATLAS_CLIENT_ID: ${{ secrets.mongodb_atlas_client_id }} + MONGODB_ATLAS_CLIENT_SECRET: ${{ secrets.mongodb_atlas_client_secret }} + MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_REGEX_RUN: '^TestAcc' # don't run migration tests because previous provider versions don't support SA + ACCTEST_PACKAGES: | + ./internal/service/alertconfiguration + ./internal/service/databaseuser + ./internal/service/maintenancewindow + run: make testacc autogen: needs: [ change-detection, get-provider-version ] if: ${{ needs.change-detection.outputs.autogen == 'true' || inputs.test_group == 'autogen' }} diff --git a/internal/config/client.go b/internal/config/client.go index 00497c80ae..db611cde2a 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -7,7 +7,6 @@ import ( "net" "net/http" "net/url" - "strconv" "strings" "time" @@ -26,15 +25,12 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/version" - "go.mongodb.org/atlas-sdk/v20250312007/auth/clientcredentials" - "go.mongodb.org/atlas-sdk/v20250312007/auth" ) const ( - toolName = "terraform-provider-mongodbatlas" - terraformPlatformName = "Terraform" - previewV2AdvancedClusterEnabledUAKey = "AdvancedClusterPreview" + toolName = "terraform-provider-mongodbatlas" + terraformPlatformName = "Terraform" timeout = 5 * time.Second keepAlive = 30 * time.Second @@ -100,15 +96,14 @@ type MongoDBClient struct { // Config contains the configurations needed to use SDKs type Config struct { - AssumeRole *AssumeRole - PublicKey string - PrivateKey string - BaseURL string - RealmBaseURL string - TerraformVersion string - ClientID string - ClientSecret string - PreviewV2AdvancedClusterEnabled bool + AssumeRole *AssumeRole + PublicKey string + PrivateKey string + BaseURL string + RealmBaseURL string + TerraformVersion string + ClientID string + ClientSecret string } // CredentialProvider implementation for Config @@ -144,87 +139,58 @@ func (c *Config) NewClient(ctx context.Context) (any, error) { networkLoggingTransport := NewTransportWithNetworkLogging(baseTransport, logging.IsDebugOrHigher()) var client *http.Client - var optsAtlas []matlasClient.ClientOpt // Determine authentication method based on available credentials switch ResolveAuthMethod(c) { case ServiceAccount: - conf := clientcredentials.NewConfig(c.ClientID, c.ClientSecret) - // Override TokenURL and RevokeURL if custom BaseURL is provided - if c.BaseURL != "" { - baseURL := strings.TrimRight(c.BaseURL, "/") - conf.TokenURL = baseURL + clientcredentials.TokenAPIPath - conf.RevokeURL = baseURL + clientcredentials.RevokeAPIPath - } - - // Create a base HTTP client for token acquisition - baseHTTPClient := &http.Client{ - Transport: networkLoggingTransport, - } - - // Set the HTTP client in context for token acquisition - ctx = context.WithValue(ctx, auth.HTTPClient, baseHTTPClient) - - tokenSource := conf.TokenSource(ctx) - - // Acquire an initial token upfront for several reasons: - // 1. OAuth2 token caching: The oauth2 library only caches tokens after successful acquisition - // 2. Early credential validation: Fail fast during provider init rather than first resource operation - // 3. Performance: Subsequent requests use cached tokens instead of blocking for token acquisition - _, err := tokenSource.Token() + tokenSource, err := tokenSource(ctx, c, networkLoggingTransport) if err != nil { - return nil, fmt.Errorf("failed to acquire OAuth2 token: %w", err) + return nil, err } - oauthClient := auth.NewClient(ctx, tokenSource) + // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. tfLoggingTransport := logging.NewTransport("Atlas", oauthClient.Transport) oauthClient.Transport = tfLoggingTransport client = oauthClient - optsAtlas = []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} case Digest: digestTransport := digest.NewTransportWithHTTPRoundTripper(cast.ToString(c.PublicKey), cast.ToString(c.PrivateKey), networkLoggingTransport) // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. tfLoggingTransport := logging.NewTransport("Atlas", digestTransport) client = &http.Client{Transport: tfLoggingTransport} - optsAtlas = []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} case Unknown: } + // Initialize the old SDK + optsAtlas := []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} if c.BaseURL != "" { optsAtlas = append(optsAtlas, matlasClient.SetBaseURL(c.BaseURL)) } - - // Initialize the MongoDB Atlas API Client. atlasClient, err := matlasClient.New(client, optsAtlas...) if err != nil { return nil, err } + // Initialize the new SDK for different versions sdkV2Client, err := c.newSDKV2Client(client) if err != nil { return nil, err } - sdkPreviewClient, err := c.newSDKPreviewClient(client) if err != nil { return nil, err } - sdkV220240530Client, err := c.newSDKV220240530Client(client) if err != nil { return nil, err } - sdkV220240805Client, err := c.newSDKV220240805Client(client) if err != nil { return nil, err } - sdkV220241113Client, err := c.newSDKV220241113Client(client) if err != nil { return nil, err } - clients := &MongoDBClient{ Atlas: atlasClient, AtlasV2: sdkV2Client, @@ -387,14 +353,10 @@ func (c *MongoDBClient) UntypedAPICall(ctx context.Context, params *APICallParam } func userAgent(c *Config) string { - isPreviewV2AdvancedClusterEnabled := c.PreviewV2AdvancedClusterEnabled - metadata := []UAMetadata{ {toolName, version.ProviderVersion}, {terraformPlatformName, c.TerraformVersion}, - {previewV2AdvancedClusterEnabledUAKey, strconv.FormatBool(isPreviewV2AdvancedClusterEnabled)}, } - var parts []string for _, info := range metadata { part := fmt.Sprintf("%s/%s", info.Name, info.Value) diff --git a/internal/config/service_account.go b/internal/config/service_account.go new file mode 100644 index 0000000000..399020ec88 --- /dev/null +++ b/internal/config/service_account.go @@ -0,0 +1,51 @@ +package config + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/mongodb/atlas-sdk-go/auth" + "github.com/mongodb/atlas-sdk-go/auth/clientcredentials" + "golang.org/x/oauth2" +) + +var saInfo = struct { + tokenSource auth.TokenSource + clientID string + clientSecret string + baseURL string + mu sync.Mutex +}{} + +func tokenSource(ctx context.Context, c *Config, base http.RoundTripper) (auth.TokenSource, error) { + saInfo.mu.Lock() + defer saInfo.mu.Unlock() + + if saInfo.tokenSource != nil { + if saInfo.clientID != c.ClientID || saInfo.clientSecret != c.ClientSecret || saInfo.baseURL != c.BaseURL { + return nil, fmt.Errorf("service account credentials changed") + } + return saInfo.tokenSource, nil + } + + conf := clientcredentials.NewConfig(c.ClientID, c.ClientSecret) + if c.BaseURL != "" { + baseURL := strings.TrimRight(c.BaseURL, "/") + conf.TokenURL = baseURL + clientcredentials.TokenAPIPath + conf.RevokeURL = baseURL + clientcredentials.RevokeAPIPath + } + ctx = context.WithValue(ctx, auth.HTTPClient, &http.Client{Transport: base}) + token, err := conf.TokenSource(ctx).Token() + if err != nil { + return nil, err + } + saInfo.clientID = c.ClientID + saInfo.clientSecret = c.ClientSecret + saInfo.baseURL = c.BaseURL + // TODO: token will be refreshed in a follow-up PR + saInfo.tokenSource = oauth2.StaticTokenSource(token) + return saInfo.tokenSource, nil +} diff --git a/internal/service/advancedcluster/main_test.go b/internal/service/advancedcluster/main_test.go index 109c76818b..53fef27c8a 100644 --- a/internal/service/advancedcluster/main_test.go +++ b/internal/service/advancedcluster/main_test.go @@ -8,18 +8,6 @@ import ( ) func TestMain(m *testing.M) { - // Only modify credentials for unit tests. Preserve GH Actions env in acceptance (TF_ACC=1). - if os.Getenv("TF_ACC") == "" { - // If no credentials are provided, force digest auth to satisfy provider validation. - _ = os.Setenv("MONGODB_ATLAS_PUBLIC_API_KEY", "dummy") - _ = os.Setenv("MONGODB_ATLAS_PRIVATE_API_KEY", "dummy") - // Ensure Service Account auth is not selected if we just set digest - _ = os.Unsetenv("MONGODB_ATLAS_CLIENT_ID") - _ = os.Unsetenv("MONGODB_ATLAS_CLIENT_SECRET") - _ = os.Unsetenv("TF_VAR_CLIENT_ID") - _ = os.Unsetenv("TF_VAR_CLIENT_SECRET") - } - cleanup := acc.SetupSharedResources() exitCode := m.Run() cleanup() diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index 6f900c6677..57ed0aece0 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -481,10 +481,10 @@ func checkAggr(orgOwnerID, name, description string, settings *admin.Organizatio "name": name, "org_owner_id": orgOwnerID, "description": description, - "api_access_list_required": strconv.FormatBool(*settings.ApiAccessListRequired), - "multi_factor_auth_required": strconv.FormatBool(*settings.MultiFactorAuthRequired), - "restrict_employee_access": strconv.FormatBool(*settings.RestrictEmployeeAccess), - "gen_ai_features_enabled": strconv.FormatBool(*settings.GenAIFeaturesEnabled), + "api_access_list_required": strconv.FormatBool(settings.GetApiAccessListRequired()), + "multi_factor_auth_required": strconv.FormatBool(settings.GetMultiFactorAuthRequired()), + "restrict_employee_access": strconv.FormatBool(settings.GetRestrictEmployeeAccess()), + "gen_ai_features_enabled": strconv.FormatBool(settings.GetGenAIFeaturesEnabled()), "security_contact": settings.GetSecurityContact(), } checks := []resource.TestCheckFunc{ diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 5759e01a18..709c4bb89b 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -52,6 +52,12 @@ func ConnV2UsingGov() *admin.APIClient { } func init() { + if InUnitTest() { // Dummy credentials for unit tests + os.Setenv("MONGODB_ATLAS_PUBLIC_KEY", "dummy") + os.Setenv("MONGODB_ATLAS_PRIVATE_KEY", "dummy") + os.Unsetenv("MONGODB_ATLAS_CLIENT_ID") + os.Unsetenv("MONGODB_ATLAS_CLIENT_SECRET") + } TestAccProviderV6Factories = map[string]func() (tfprotov6.ProviderServer, error){ ProviderNameMongoDBAtlas: func() (tfprotov6.ProviderServer, error) { return provider.MuxProviderFactory()(), nil @@ -60,6 +66,8 @@ func init() { cfg := config.Config{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), PrivateKey: os.Getenv("MONGODB_ATLAS_PRIVATE_KEY"), + ClientID: os.Getenv("MONGODB_ATLAS_CLIENT_ID"), + ClientSecret: os.Getenv("MONGODB_ATLAS_CLIENT_SECRET"), BaseURL: os.Getenv("MONGODB_ATLAS_BASE_URL"), RealmBaseURL: os.Getenv("MONGODB_REALM_BASE_URL"), } diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 41b830c66c..94bb2c01b0 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -11,10 +11,8 @@ import ( func PreCheckBasic(tb testing.TB) { tb.Helper() - if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") == "" || - os.Getenv("MONGODB_ATLAS_ORG_ID") == "" { - tb.Fatal("`MONGODB_ATLAS_PUBLIC_KEY`, `MONGODB_ATLAS_PRIVATE_KEY`, and `MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") + if os.Getenv("MONGODB_ATLAS_ORG_ID") == "" { + tb.Fatal("`MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") } } diff --git a/internal/testutil/acc/skip.go b/internal/testutil/acc/skip.go index 29a89710f2..cd3fcb820d 100644 --- a/internal/testutil/acc/skip.go +++ b/internal/testutil/acc/skip.go @@ -2,7 +2,7 @@ package acc import ( "os" - "strings" + "strconv" "testing" ) @@ -15,7 +15,8 @@ func SkipTestForCI(tb testing.TB) { } func InCI() bool { - return strings.EqualFold(os.Getenv("CI"), "true") + val, _ := strconv.ParseBool(os.Getenv("CI")) + return val } // SkipInUnitTest allows skipping a test entirely when TF_ACC=1 is not defined. From c9dc4f0d0335f69fd046957c5af81ac1c38eb04c Mon Sep 17 00:00:00 2001 From: Oriol Date: Mon, 29 Sep 2025 09:02:41 +0200 Subject: [PATCH 3/8] feat: Adds support for JWT Token credentials as provider inputs, environment variables and AWS Secrets Manager (#3716) * support JWT token as authentication credentials * changelog * Update internal/testutil/acc/pre_check.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * token has priority over SA and PAK * rename methods * mention SA in changelog --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .changelog/3716.txt | 3 ++ internal/config/client.go | 33 ++++++++++++--- internal/provider/credentials.go | 4 +- internal/provider/provider.go | 14 +++++++ .../provider/provider_authentication_test.go | 40 ++++++++++++++----- internal/provider/provider_sdk2.go | 16 ++++++++ internal/testutil/acc/pre_check.go | 16 +++++++- 7 files changed, 110 insertions(+), 16 deletions(-) create mode 100644 .changelog/3716.txt diff --git a/.changelog/3716.txt b/.changelog/3716.txt new file mode 100644 index 0000000000..f45fa9226e --- /dev/null +++ b/.changelog/3716.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +provider: Supports Service Account JWT Token as credentials to authenticate the provider +``` diff --git a/internal/config/client.go b/internal/config/client.go index db611cde2a..d12e0ce172 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -26,6 +26,7 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/version" "go.mongodb.org/atlas-sdk/v20250312007/auth" + "golang.org/x/oauth2" ) const ( @@ -45,6 +46,7 @@ type AuthMethod int const ( ServiceAccount AuthMethod = iota Digest + AccessToken Unknown ) @@ -54,21 +56,27 @@ type CredentialProvider interface { GetPrivateKey() string GetClientID() string GetClientSecret() string + GetAccessToken() string } // IsDigestAuth checks if public/private key credentials are present -func IsDigestAuth(cp CredentialProvider) bool { +func IsDigestAuthPresent(cp CredentialProvider) bool { return cp.GetPublicKey() != "" && cp.GetPrivateKey() != "" } // IsServiceAccountAuth checks if client ID/secret credentials are present -func IsServiceAccountAuth(cp CredentialProvider) bool { +func IsServiceAccountAuthPresent(cp CredentialProvider) bool { return cp.GetClientID() != "" && cp.GetClientSecret() != "" } +// IsAccessTokenAuth checks if access token credentials are present +func IsAccessTokenAuthPresent(cp CredentialProvider) bool { + return cp.GetAccessToken() != "" +} + // HasValidAuthCredentials checks if any valid authentication method is provided func HasValidAuthCredentials(cp CredentialProvider) bool { - return IsDigestAuth(cp) || IsServiceAccountAuth(cp) + return IsDigestAuthPresent(cp) || IsServiceAccountAuthPresent(cp) || IsAccessTokenAuthPresent(cp) } var baseTransport = &http.Transport{ @@ -104,6 +112,7 @@ type Config struct { TerraformVersion string ClientID string ClientSecret string + AccessToken string } // CredentialProvider implementation for Config @@ -111,6 +120,7 @@ func (c *Config) GetPublicKey() string { return c.PublicKey } func (c *Config) GetPrivateKey() string { return c.PrivateKey } func (c *Config) GetClientID() string { return c.ClientID } func (c *Config) GetClientSecret() string { return c.ClientSecret } +func (c *Config) GetAccessToken() string { return c.AccessToken } type AssumeRole struct { Tags map[string]string @@ -142,6 +152,16 @@ func (c *Config) NewClient(ctx context.Context) (any, error) { // Determine authentication method based on available credentials switch ResolveAuthMethod(c) { + case AccessToken: + // Use a static bearer token with oauth2 transport + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: c.AccessToken, + TokenType: "Bearer", + }) + oauthClient := auth.NewClient(ctx, tokenSource) + tfLoggingTransport := logging.NewTransport("Atlas", oauthClient.Transport) + oauthClient.Transport = tfLoggingTransport + client = oauthClient case ServiceAccount: tokenSource, err := tokenSource(ctx, c, networkLoggingTransport) if err != nil { @@ -368,10 +388,13 @@ func userAgent(c *Config) string { // ResolveAuthMethod determines the authentication method from any credential provider func ResolveAuthMethod(cg CredentialProvider) AuthMethod { - if IsServiceAccountAuth(cg) { + if IsAccessTokenAuthPresent(cg) { + return AccessToken + } + if IsServiceAccountAuthPresent(cg) { return ServiceAccount } - if IsDigestAuth(cg) { + if IsDigestAuthPresent(cg) { return Digest } return Unknown diff --git a/internal/provider/credentials.go b/internal/provider/credentials.go index e945c1a9b6..e82a195e7b 100644 --- a/internal/provider/credentials.go +++ b/internal/provider/credentials.go @@ -76,6 +76,8 @@ func configureCredentialsSTS(cfg *config.Config, secret, region, awsAccessKeyID, } switch config.ResolveAuthMethod(&secretData) { + case config.AccessToken: + cfg.AccessToken = secretData.AccessToken case config.Digest: cfg.PublicKey = secretData.PublicKey cfg.PrivateKey = secretData.PrivateKey @@ -83,7 +85,7 @@ func configureCredentialsSTS(cfg *config.Config, secret, region, awsAccessKeyID, cfg.ClientID = secretData.ClientID cfg.ClientSecret = secretData.ClientSecret case config.Unknown: - return *cfg, fmt.Errorf("secret missing value for supported credentials: PrivateKey/PublicKey or ClientID/ClientSecret") + return *cfg, fmt.Errorf("secret missing value for supported credentials: PrivateKey/PublicKey, ClientID/ClientSecret or AccessToken") } return *cfg, nil diff --git a/internal/provider/provider.go b/internal/provider/provider.go index c0598abf93..9ec45dbca1 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -87,6 +87,7 @@ type tfMongodbAtlasProviderModel struct { AwsSessionToken types.String `tfsdk:"aws_session_token"` ClientID types.String `tfsdk:"client_id"` ClientSecret types.String `tfsdk:"client_secret"` + AccessToken types.String `tfsdk:"access_token"` IsMongodbGovCloud types.Bool `tfsdk:"is_mongodbgov_cloud"` } @@ -198,6 +199,10 @@ func (p *MongodbtlasProvider) Schema(ctx context.Context, req provider.SchemaReq Optional: true, Description: "MongoDB Atlas Client Secret for Service Account.", }, + "access_token": schema.StringAttribute{ + Optional: true, + Description: "MongoDB Atlas Access Token for Service Account.", + }, }, } } @@ -288,6 +293,7 @@ func (p *MongodbtlasProvider) Configure(ctx context.Context, req provider.Config TerraformVersion: req.TerraformVersion, ClientID: data.ClientID.ValueString(), ClientSecret: data.ClientSecret.ValueString(), + AccessToken: data.AccessToken.ValueString(), } var assumeRoles []tfAssumeRoleModel @@ -470,12 +476,20 @@ func setDefaultValuesWithValidations(ctx context.Context, data *tfMongodbAtlasPr }, "").(string)) } + if data.AccessToken.ValueString() == "" { + data.AccessToken = types.StringValue(MultiEnvDefaultFunc([]string{ + "MONGODB_ATLAS_OAUTH_TOKEN", + "TF_VAR_OAUTH_TOKEN", + }, "").(string)) + } + // Check if any valid authentication method is provided if !config.HasValidAuthCredentials(&config.Config{ PublicKey: data.PublicKey.ValueString(), PrivateKey: data.PrivateKey.ValueString(), ClientID: data.ClientID.ValueString(), ClientSecret: data.ClientSecret.ValueString(), + AccessToken: data.AccessToken.ValueString(), }) && !awsRoleDefined { resp.Diagnostics.AddError(ProviderConfigError, MissingAuthAttrError) } diff --git a/internal/provider/provider_authentication_test.go b/internal/provider/provider_authentication_test.go index f52b115139..dfb06e3932 100644 --- a/internal/provider/provider_authentication_test.go +++ b/internal/provider/provider_authentication_test.go @@ -16,7 +16,7 @@ func TestAccSTSAssumeRole_basic(t *testing.T) { projectName = acc.RandomProjectName() ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckSTSAssumeRole(t); acc.PreCheckRegularCredsAreEmpty(t) }, + PreCheck: func() { acc.PreCheckSTSAssumeRole(t); acc.PreCheckPAKCredsAreEmpty(t); acc.PreCheckSACredsAreEmpty(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyProject, Steps: []resource.TestStep{ @@ -42,16 +42,37 @@ func TestAccSTSAssumeRole_basic(t *testing.T) { func TestAccServiceAccount_basic(t *testing.T) { var ( - resourceName = "data.mongodbatlas_projects.test" + resourceName = "data.mongodbatlas_organization.test" + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acc.PreCheckServiceAccount(t); acc.PreCheckPAKCredsAreEmpty(t) }, + ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, + Steps: []resource.TestStep{ + { + Config: configDataSourceOrg(orgID), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "org_id"), + ), + }, + }, + }) +} + +func TestAccAccessToken_basic(t *testing.T) { + acc.SkipTestForCI(t) // access token has a validity period of 1 hour, so it cannot be used in CI reliably + var ( + resourceName = "data.mongodbatlas_organization.test" + orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckServiceAccount(t); acc.PreCheckRegularCredsAreEmpty(t) }, + PreCheck: func() { acc.PreCheckAccessToken(t); acc.PreCheckPAKCredsAreEmpty(t); acc.PreCheckSACredsAreEmpty(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configDataSourceProject(), + Config: configDataSourceOrg(orgID), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrSet(resourceName, "results.#"), + resource.TestCheckResourceAttrSet(resourceName, "org_id"), ), }, }, @@ -67,9 +88,10 @@ func configProject(orgID, projectName string) string { `, orgID, projectName) } -func configDataSourceProject() string { - return ` - data "mongodbatlas_projects" "test" { +func configDataSourceOrg(orgID string) string { + return fmt.Sprintf(` + data "mongodbatlas_organization" "test" { + org_id = %[1]q } - ` + `, orgID) } diff --git a/internal/provider/provider_sdk2.go b/internal/provider/provider_sdk2.go index 337c591c1a..47a6df2997 100644 --- a/internal/provider/provider_sdk2.go +++ b/internal/provider/provider_sdk2.go @@ -61,6 +61,7 @@ type SecretData struct { PrivateKey string `json:"private_key"` ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` } // CredentialProvider implementation for SecretData @@ -68,6 +69,7 @@ func (s *SecretData) GetPublicKey() string { return s.PublicKey } func (s *SecretData) GetPrivateKey() string { return s.PrivateKey } func (s *SecretData) GetClientID() string { return s.ClientID } func (s *SecretData) GetClientSecret() string { return s.ClientSecret } +func (s *SecretData) GetAccessToken() string { return s.AccessToken } // NewSdkV2Provider returns the provider to be use by the code. func NewSdkV2Provider() *schema.Provider { @@ -140,6 +142,11 @@ func NewSdkV2Provider() *schema.Provider { Optional: true, Description: "MongoDB Atlas Client Secret for Service Account.", }, + "access_token": { + Type: schema.TypeString, + Optional: true, + Description: "MongoDB Atlas Access Token for Service Account.", + }, }, DataSourcesMap: getDataSourcesMap(), ResourcesMap: getResourcesMap(), @@ -303,6 +310,7 @@ func providerConfigure(provider *schema.Provider) func(ctx context.Context, d *s TerraformVersion: provider.TerraformVersion, ClientID: d.Get("client_id").(string), ClientSecret: d.Get("client_secret").(string), + AccessToken: d.Get("access_token").(string), } assumeRoleValue, ok := d.GetOk("assume_role") @@ -444,12 +452,20 @@ func setDefaultsAndValidations(d *schema.ResourceData) diag.Diagnostics { return append(diagnostics, diag.FromErr(err)...) } + if err := setValueFromConfigOrEnv(d, "access_token", []string{ + "MONGODB_ATLAS_OAUTH_TOKEN", + "TF_VAR_OAUTH_TOKEN", + }); err != nil { + return append(diagnostics, diag.FromErr(err)...) + } + // Check if any valid authentication method is provided if !config.HasValidAuthCredentials(&config.Config{ PublicKey: d.Get("public_key").(string), PrivateKey: d.Get("private_key").(string), ClientID: d.Get("client_id").(string), ClientSecret: d.Get("client_secret").(string), + AccessToken: d.Get("access_token").(string), }) && !awsRoleDefined { diagnostics = append(diagnostics, diag.Diagnostic{Severity: diag.Error, Summary: MissingAuthAttrError}) } diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 94bb2c01b0..8bd5aa47ad 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -267,13 +267,20 @@ func PreCheckAwsEnvPrivateLinkEndpointService(tb testing.TB) { } } -func PreCheckRegularCredsAreEmpty(tb testing.TB) { +func PreCheckPAKCredsAreEmpty(tb testing.TB) { tb.Helper() if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") != "" || os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") != "" { tb.Fatal(`"MONGODB_ATLAS_PUBLIC_KEY" and "MONGODB_ATLAS_PRIVATE_KEY" are defined in this test and they should not.`) } } +func PreCheckSACredsAreEmpty(tb testing.TB) { + tb.Helper() + if os.Getenv("MONGODB_ATLAS_CLIENT_ID") != "" || os.Getenv("MONGODB_ATLAS_CLIENT_SECRET") != "" { + tb.Fatal(`"MONGODB_ATLAS_CLIENT_ID" and "MONGODB_ATLAS_CLIENT_SECRET" are defined in this test and they should not.`) + } +} + func PreCheckSTSAssumeRole(tb testing.TB) { tb.Helper() if os.Getenv("AWS_REGION") == "" { @@ -383,3 +390,10 @@ func PreCheckServiceAccount(tb testing.TB) { tb.Fatal("`MONGODB_ATLAS_CLIENT_ID`, `MONGODB_ATLAS_CLIENT_SECRET` must be set for Service Account acceptance testing") } } + +func PreCheckAccessToken(tb testing.TB) { + tb.Helper() + if os.Getenv("MONGODB_ATLAS_OAUTH_TOKEN") == "" { + tb.Fatal("`MONGODB_ATLAS_OAUTH_TOKEN` must be set for Atlas Access Token acceptance testing") + } +} From 00a7e12fc25aa293d1a04a94c4c2a49b8fd743dc Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Tue, 30 Sep 2025 12:46:23 +0200 Subject: [PATCH 4/8] chore: Implement Service Account token refresh policy (#3712) --- .github/workflows/acceptance-tests-runner.yml | 31 +++++--- .github/workflows/acceptance-tests.yml | 13 +++- .github/workflows/test-suite.yml | 7 +- internal/config/client.go | 48 ++++++------ internal/config/service_account.go | 18 +++-- internal/config/transport.go | 5 +- internal/config/transport_test.go | 8 +- .../provider/provider_authentication_test.go | 11 ++- .../resource_project_migration_test.go | 1 + .../service/project/resource_project_test.go | 1 + internal/testutil/acc/pre_check.go | 77 +++++-------------- internal/testutil/acc/skip.go | 22 ++++++ 12 files changed, 132 insertions(+), 110 deletions(-) diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 23855cd7fd..7b953cbdfb 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -31,6 +31,11 @@ on: required: false default: false + + use_sa: + description: "Run tests using Service Account instead of API Keys" + type: boolean + required: false mongodb_atlas_org_id: type: string required: true @@ -208,14 +213,17 @@ env: TF_ACC: 1 TF_LOG: ${{ vars.LOG_LEVEL }} ACCTEST_TIMEOUT: ${{ vars.ACCTEST_TIMEOUT }} - # Only Migration tests are run when a specific previous provider version is set - # If the name (regex) of the test is set, only that test is run - ACCTEST_REGEX_RUN: ${{ inputs.test_name || inputs.provider_version == '' && '^Test(Acc|Mig)' || '^TestMig' }} + # Only Migration tests are run when a specific previous provider version is set. + # Don't run migration tests if using Service Accounts because previous provider versions don't support SA yet. + # If the name (regex) of the test is set, only that test is run. + ACCTEST_REGEX_RUN: ${{ inputs.test_name || inputs.use_sa && '^TestAcc' || inputs.provider_version == '' && '^Test(Acc|Mig)' || '^TestMig' }} MONGODB_ATLAS_BASE_URL: ${{ inputs.mongodb_atlas_base_url }} MONGODB_REALM_BASE_URL: ${{ inputs.mongodb_realm_base_url }} MONGODB_ATLAS_ORG_ID: ${{ inputs.mongodb_atlas_org_id }} - MONGODB_ATLAS_PUBLIC_KEY: ${{ secrets.mongodb_atlas_public_key }} - MONGODB_ATLAS_PRIVATE_KEY: ${{ secrets.mongodb_atlas_private_key }} + MONGODB_ATLAS_PUBLIC_KEY: ${{ inputs.use_sa == false && secrets.mongodb_atlas_public_key || '' }} + MONGODB_ATLAS_PRIVATE_KEY: ${{ inputs.use_sa == false && secrets.mongodb_atlas_private_key || '' }} + MONGODB_ATLAS_CLIENT_ID: ${{ inputs.use_sa && secrets.mongodb_atlas_client_id || '' }} + MONGODB_ATLAS_CLIENT_SECRET: ${{ inputs.use_sa && secrets.mongodb_atlas_client_secret || '' }} MONGODB_ATLAS_PUBLIC_KEY_READ_ONLY: ${{ secrets.mongodb_atlas_public_key_read_only }} MONGODB_ATLAS_PRIVATE_KEY_READ_ONLY: ${{ secrets.mongodb_atlas_private_key_read_only }} MONGODB_ATLAS_GOV_PUBLIC_KEY: ${{ secrets.mongodb_atlas_gov_public_key }} @@ -413,7 +421,7 @@ jobs: advanced_cluster_tpf_mig_from_sdkv2: needs: [ change-detection, get-provider-version ] - if: ${{ inputs.reduced_tests == false && (needs.change-detection.outputs.advanced_cluster == 'true' || inputs.test_group == 'advanced_cluster') }} + if: ${{ inputs.reduced_tests == false && inputs.use_sa == false && (needs.change-detection.outputs.advanced_cluster == 'true' || inputs.test_group == 'advanced_cluster') }} runs-on: ubuntu-latest permissions: {} steps: @@ -438,7 +446,7 @@ jobs: advanced_cluster_tpf_mig_from_tpf_preview: needs: [ change-detection, get-provider-version ] - if: ${{ inputs.reduced_tests == false && (needs.change-detection.outputs.advanced_cluster == 'true' || inputs.test_group == 'advanced_cluster') }} + if: ${{ inputs.reduced_tests == false && inputs.use_sa == false && (needs.change-detection.outputs.advanced_cluster == 'true' || inputs.test_group == 'advanced_cluster') }} runs-on: ubuntu-latest permissions: {} steps: @@ -491,6 +499,8 @@ jobs: env: MONGODB_ATLAS_PUBLIC_KEY: "" MONGODB_ATLAS_PRIVATE_KEY: "" + MONGODB_ATLAS_CLIENT_ID: "" + MONGODB_ATLAS_CLIENT_SECRET: "" ASSUME_ROLE_ARN: ${{ vars.ASSUME_ROLE_ARN }} AWS_REGION: ${{ vars.AWS_REGION }} STS_ENDPOINT: ${{ vars.STS_ENDPOINT }} @@ -526,7 +536,7 @@ jobs: ./internal/service/maintenancewindow run: make testacc autogen: - needs: [ change-detection, get-provider-version ] + needs: [change-detection, get-provider-version] if: ${{ needs.change-detection.outputs.autogen == 'true' || inputs.test_group == 'autogen' }} runs-on: ubuntu-latest permissions: {} @@ -793,8 +803,9 @@ jobs: run: make testacc event_trigger: - needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.event_trigger == 'true' || inputs.test_group == 'event_trigger' }} + needs: [change-detection, get-provider-version] + # Realm SDK doesn't support SA + if: ${{ inputs.use_sa == false && (needs.change-detection.outputs.event_trigger == 'true' || inputs.test_group == 'event_trigger') }} runs-on: ubuntu-latest permissions: {} steps: diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index e30844f365..debcacca09 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -1,5 +1,5 @@ name: 'Acceptance Tests' -run-name: 'Acceptance Tests ${{ inputs.atlas_cloud_env }} ${{ inputs.test_group }}' +run-name: "Acceptance Tests ${{ inputs.atlas_cloud_env }} ${{ inputs.test_group }} ${{ inputs.use_sa && 'sa' || 'pak'}}" # Used for running acceptance tests, either triggered manually or called by other workflows. on: @@ -29,6 +29,11 @@ on: description: 'The branch, tag or SHA where tests will run, e.g. v1.14.0, empty for default branch' type: string required: false + use_sa: + description: "Run tests using Service Account instead of API Keys" + type: boolean + required: false + workflow_call: # workflow runs after Test Suite or code-health inputs: terraform_version: @@ -52,6 +57,11 @@ on: type: boolean required: false + use_sa: + description: "Run tests using Service Account instead of API Keys" + type: boolean + required: false + jobs: tests: name: tests-${{ inputs.terraform_version || 'latest' }}-${{ inputs.provider_version || 'latest' }}-${{ inputs.atlas_cloud_env || 'dev' }} @@ -105,6 +115,7 @@ jobs: test_group: ${{ inputs.test_group }} test_name: ${{ inputs.test_name }} reduced_tests: ${{ inputs.reduced_tests || false }} + use_sa: ${{ inputs.use_sa || false }} aws_region_federation: ${{ vars.AWS_REGION_FEDERATION }} mongodb_atlas_org_id: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_ORG_ID_CLOUD_QA || vars.MONGODB_ATLAS_ORG_ID_CLOUD_DEV }} mongodb_atlas_base_url: ${{ inputs.atlas_cloud_env == 'qa' && vars.MONGODB_ATLAS_BASE_URL_QA || vars.MONGODB_ATLAS_BASE_URL }} diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index dcf3085aec..7897ac90c8 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -17,6 +17,10 @@ on: description: 'Send the Slack notification if any of the tests fail.' type: boolean default: false + use_sa: + description: "Run tests using Service Account instead of API Keys" + type: boolean + required: false workflow_call: inputs: terraform_matrix: @@ -70,13 +74,14 @@ jobs: matrix: terraform_version: ${{ fromJSON(needs.variables.outputs.terraform_matrix) }} provider_version: ${{ fromJSON(needs.variables.outputs.provider_matrix) }} - name: ${{ matrix.terraform_version || 'latest' }}-${{ matrix.provider_version || 'latest' }} + name: ${{ matrix.terraform_version || 'latest' }}-${{ matrix.provider_version || 'latest' }}-${{ inputs.use_sa && 'sa' || 'pak' }} secrets: inherit uses: ./.github/workflows/acceptance-tests.yml with: terraform_version: ${{ matrix.terraform_version }} provider_version: ${{ matrix.provider_version }} atlas_cloud_env: ${{ inputs.atlas_cloud_env || needs.variables.outputs.is_sun == 'true' && 'qa' || '' }} # Run against QA on Sundays + use_sa: ${{ inputs.use_sa || false }} clean-after: needs: tests if: ${{ !cancelled() }} diff --git a/internal/config/client.go b/internal/config/client.go index d12e0ce172..c44fb9bcd8 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -21,11 +21,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/mongodb-forks/digest" adminpreview "github.com/mongodb/atlas-sdk-go/admin" - "github.com/spf13/cast" "github.com/mongodb/terraform-provider-mongodbatlas/version" - "go.mongodb.org/atlas-sdk/v20250312007/auth" "golang.org/x/oauth2" ) @@ -91,6 +89,17 @@ var baseTransport = &http.Transport{ ExpectContinueTimeout: expectContinueTimeout, } +// networkLoggingBaseTransport should be used as a base for authentication transport so authentication requests can be logged. +func networkLoggingBaseTransport() http.RoundTripper { + return NewTransportWithNetworkLogging(baseTransport, logging.IsDebugOrHigher()) +} + +// tfLoggingInterceptor should wrap the authentication transport to add Terraform logging. +func tfLoggingInterceptor(base http.RoundTripper) http.RoundTripper { + // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. + return logging.NewTransport("Atlas", base) +} + // MongoDBClient contains the mongodbatlas clients and configurations type MongoDBClient struct { Atlas *matlasClient.Client @@ -145,40 +154,31 @@ type UAMetadata struct { } func (c *Config) NewClient(ctx context.Context) (any, error) { - // Network Logging transport is before authentication transport so it can log authentication requests - networkLoggingTransport := NewTransportWithNetworkLogging(baseTransport, logging.IsDebugOrHigher()) - - var client *http.Client - - // Determine authentication method based on available credentials + transport := networkLoggingBaseTransport() switch ResolveAuthMethod(c) { case AccessToken: - // Use a static bearer token with oauth2 transport tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ AccessToken: c.AccessToken, - TokenType: "Bearer", + TokenType: "Bearer", // Use a static bearer token with oauth2 transport. }) - oauthClient := auth.NewClient(ctx, tokenSource) - tfLoggingTransport := logging.NewTransport("Atlas", oauthClient.Transport) - oauthClient.Transport = tfLoggingTransport - client = oauthClient + transport = &oauth2.Transport{ + Source: tokenSource, + Base: networkLoggingBaseTransport(), + } case ServiceAccount: - tokenSource, err := tokenSource(ctx, c, networkLoggingTransport) + tokenSource, err := getTokenSource(c, networkLoggingBaseTransport()) if err != nil { return nil, err } - oauthClient := auth.NewClient(ctx, tokenSource) - // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. - tfLoggingTransport := logging.NewTransport("Atlas", oauthClient.Transport) - oauthClient.Transport = tfLoggingTransport - client = oauthClient + transport = &oauth2.Transport{ + Source: tokenSource, + Base: networkLoggingBaseTransport(), + } case Digest: - digestTransport := digest.NewTransportWithHTTPRoundTripper(cast.ToString(c.PublicKey), cast.ToString(c.PrivateKey), networkLoggingTransport) - // Don't change logging.NewTransport to NewSubsystemLoggingHTTPTransport until all resources are in TPF. - tfLoggingTransport := logging.NewTransport("Atlas", digestTransport) - client = &http.Client{Transport: tfLoggingTransport} + transport = digest.NewTransportWithHTTPRoundTripper(c.PublicKey, c.PrivateKey, networkLoggingBaseTransport()) case Unknown: } + client := &http.Client{Transport: tfLoggingInterceptor(transport)} // Initialize the old SDK optsAtlas := []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} diff --git a/internal/config/service_account.go b/internal/config/service_account.go index 399020ec88..6a214e9b17 100644 --- a/internal/config/service_account.go +++ b/internal/config/service_account.go @@ -6,12 +6,16 @@ import ( "net/http" "strings" "sync" + "time" "github.com/mongodb/atlas-sdk-go/auth" "github.com/mongodb/atlas-sdk-go/auth/clientcredentials" "golang.org/x/oauth2" ) +// Renew token if it expires within 10 minutes to avoid authentication errors during Atlas API calls. +const saTokenExpiryBuffer = 10 * time.Minute + var saInfo = struct { tokenSource auth.TokenSource clientID string @@ -20,11 +24,11 @@ var saInfo = struct { mu sync.Mutex }{} -func tokenSource(ctx context.Context, c *Config, base http.RoundTripper) (auth.TokenSource, error) { +func getTokenSource(c *Config, tokenRenewalBase http.RoundTripper) (auth.TokenSource, error) { saInfo.mu.Lock() defer saInfo.mu.Unlock() - if saInfo.tokenSource != nil { + if saInfo.tokenSource != nil { // Token source in cache. if saInfo.clientID != c.ClientID || saInfo.clientSecret != c.ClientSecret || saInfo.baseURL != c.BaseURL { return nil, fmt.Errorf("service account credentials changed") } @@ -37,15 +41,15 @@ func tokenSource(ctx context.Context, c *Config, base http.RoundTripper) (auth.T conf.TokenURL = baseURL + clientcredentials.TokenAPIPath conf.RevokeURL = baseURL + clientcredentials.RevokeAPIPath } - ctx = context.WithValue(ctx, auth.HTTPClient, &http.Client{Transport: base}) - token, err := conf.TokenSource(ctx).Token() - if err != nil { + // Use a new context to avoid "context canceled" errors as the token source is reused and can outlast the callee context. + ctx := context.WithValue(context.Background(), auth.HTTPClient, &http.Client{Transport: tokenRenewalBase}) + tokenSource := oauth2.ReuseTokenSourceWithExpiry(nil, conf.TokenSource(ctx), saTokenExpiryBuffer) + if _, err := tokenSource.Token(); err != nil { // Retrieve token to fail-fast if credentials are invalid. return nil, err } saInfo.clientID = c.ClientID saInfo.clientSecret = c.ClientSecret saInfo.baseURL = c.BaseURL - // TODO: token will be refreshed in a follow-up PR - saInfo.tokenSource = oauth2.StaticTokenSource(token) + saInfo.tokenSource = tokenSource return saInfo.tokenSource, nil } diff --git a/internal/config/transport.go b/internal/config/transport.go index 766b6a6360..100ad2872d 100644 --- a/internal/config/transport.go +++ b/internal/config/transport.go @@ -16,10 +16,7 @@ type NetworkLoggingTransport struct { // NewTransportWithNetworkLogging creates a new NetworkLoggingTransport that wraps // the provided transport with enhanced network logging capabilities. -func NewTransportWithNetworkLogging(transport http.RoundTripper, enabled bool) *NetworkLoggingTransport { - if transport == nil { - transport = http.DefaultTransport - } +func NewTransportWithNetworkLogging(transport http.RoundTripper, enabled bool) http.RoundTripper { return &NetworkLoggingTransport{ Transport: transport, Enabled: enabled, diff --git a/internal/config/transport_test.go b/internal/config/transport_test.go index e1374a4191..68c20461f0 100644 --- a/internal/config/transport_test.go +++ b/internal/config/transport_test.go @@ -160,9 +160,11 @@ func TestAccNetworkLogging(t *testing.T) { log.SetOutput(&logOutput) defer log.SetOutput(os.Stderr) cfg := &config.Config{ - PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), - PrivateKey: os.Getenv("MONGODB_ATLAS_PRIVATE_KEY"), - BaseURL: os.Getenv("MONGODB_ATLAS_BASE_URL"), + PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), + PrivateKey: os.Getenv("MONGODB_ATLAS_PRIVATE_KEY"), + ClientID: os.Getenv("MONGODB_ATLAS_CLIENT_ID"), + ClientSecret: os.Getenv("MONGODB_ATLAS_CLIENT_SECRET"), + BaseURL: os.Getenv("MONGODB_ATLAS_BASE_URL"), } clientInterface, err := cfg.NewClient(t.Context()) require.NoError(t, err) diff --git a/internal/provider/provider_authentication_test.go b/internal/provider/provider_authentication_test.go index dfb06e3932..6a8228ad2e 100644 --- a/internal/provider/provider_authentication_test.go +++ b/internal/provider/provider_authentication_test.go @@ -10,13 +10,15 @@ import ( ) func TestAccSTSAssumeRole_basic(t *testing.T) { + acc.SkipInPAK(t, "skipping as this test is for AWS credentials only") + acc.SkipInSA(t, "skipping as this test is for AWS credentials only") var ( resourceName = "mongodbatlas_project.test" orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") projectName = acc.RandomProjectName() ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckSTSAssumeRole(t); acc.PreCheckPAKCredsAreEmpty(t); acc.PreCheckSACredsAreEmpty(t) }, + PreCheck: func() { acc.PreCheckSTSAssumeRole(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroyProject, Steps: []resource.TestStep{ @@ -41,12 +43,13 @@ func TestAccSTSAssumeRole_basic(t *testing.T) { } func TestAccServiceAccount_basic(t *testing.T) { + acc.SkipInPAK(t, "skipping as this test is for SA only") var ( resourceName = "data.mongodbatlas_organization.test" orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckServiceAccount(t); acc.PreCheckPAKCredsAreEmpty(t) }, + PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { @@ -61,12 +64,14 @@ func TestAccServiceAccount_basic(t *testing.T) { func TestAccAccessToken_basic(t *testing.T) { acc.SkipTestForCI(t) // access token has a validity period of 1 hour, so it cannot be used in CI reliably + acc.SkipInPAK(t, "skipping as this test is for Token credentials only") + acc.SkipInSA(t, "skipping as this test is for Token credentials only") var ( resourceName = "data.mongodbatlas_organization.test" orgID = os.Getenv("MONGODB_ATLAS_ORG_ID") ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acc.PreCheckAccessToken(t); acc.PreCheckPAKCredsAreEmpty(t); acc.PreCheckSACredsAreEmpty(t) }, + PreCheck: func() { acc.PreCheckAccessToken(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { diff --git a/internal/service/project/resource_project_migration_test.go b/internal/service/project/resource_project_migration_test.go index 8550d77566..3d5ab79c0b 100644 --- a/internal/service/project/resource_project_migration_test.go +++ b/internal/service/project/resource_project_migration_test.go @@ -153,6 +153,7 @@ func TestMigProject_withLimits(t *testing.T) { // based on bug report: https://github.com/mongodb/terraform-provider-mongodbatlas/issues/2263 func TestMigGovProject_regionUsageRestrictionsDefault(t *testing.T) { + acc.SkipInSA(t, "SA not supported in Gov tests yet") var ( orgID = os.Getenv("MONGODB_ATLAS_GOV_ORG_ID") projectName = acc.RandomProjectName() diff --git a/internal/service/project/resource_project_test.go b/internal/service/project/resource_project_test.go index 974ec3d34c..018f101456 100644 --- a/internal/service/project/resource_project_test.go +++ b/internal/service/project/resource_project_test.go @@ -638,6 +638,7 @@ func TestAccProject_basic(t *testing.T) { } func TestAccGovProject_withProjectOwner(t *testing.T) { + acc.SkipInSA(t, "SA not supported in Gov tests yet") var ( orgID = os.Getenv("MONGODB_ATLAS_GOV_ORG_ID") projectOwnerID = os.Getenv("MONGODB_ATLAS_GOV_PROJECT_OWNER_ID") diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 8bd5aa47ad..653974237d 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -14,6 +14,12 @@ func PreCheckBasic(tb testing.TB) { if os.Getenv("MONGODB_ATLAS_ORG_ID") == "" { tb.Fatal("`MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") } + if HasPAKCreds() && HasSACreds() { + tb.Fatal("PAK and SA credentials are defined in this test but only one should be set.") + } + if !HasPAKCreds() && !HasSACreds() { + tb.Fatal("No credentials are defined in this test, PAK or SA credentials should be set.") + } } // PreCheckBasicSleep is a helper function to call SerialSleep, see its help for more info. @@ -36,31 +42,25 @@ func PreCheckBasicSleep(tb testing.TB, clusterInfo *ClusterInfo, projectID, clus // Use PreCheckBasic instead. func PreCheck(tb testing.TB) { tb.Helper() - if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PROJECT_ID") == "" || - os.Getenv("MONGODB_ATLAS_ORG_ID") == "" { - tb.Fatal("`MONGODB_ATLAS_PUBLIC_KEY`, `MONGODB_ATLAS_PRIVATE_KEY`, `MONGODB_ATLAS_PROJECT_ID` and `MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") + PreCheckBasic(tb) + if os.Getenv("MONGODB_ATLAS_PROJECT_ID") == "" { + tb.Fatal("`MONGODB_ATLAS_PROJECT_ID` must be set for acceptance testing") } } func PreCheckEncryptionAtRestPrivateEndpoint(tb testing.TB) { tb.Helper() - if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PROJECT_EAR_PE_ID") == "" || - os.Getenv("MONGODB_ATLAS_ORG_ID") == "" { - tb.Fatal("`MONGODB_ATLAS_PUBLIC_KEY`, `MONGODB_ATLAS_PRIVATE_KEY`, `MONGODB_ATLAS_PROJECT_EAR_PE_ID` and `MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") + PreCheckBasic(tb) + if os.Getenv("MONGODB_ATLAS_PROJECT_EAR_PE_ID") == "" { + tb.Fatal("`MONGODB_ATLAS_PROJECT_EAR_PE_ID` must be set for acceptance testing") } } func PreCheckCert(tb testing.TB) { tb.Helper() - if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") == "" || - os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") == "" || - os.Getenv("MONGODB_ATLAS_ORG_ID") == "" || - os.Getenv("CA_CERT") == "" { - tb.Fatal("`CA_CERT, MONGODB_ATLAS_PUBLIC_KEY`, `MONGODB_ATLAS_PRIVATE_KEY`, and `MONGODB_ATLAS_ORG_ID` must be set for acceptance testing") + PreCheckBasic(tb) + if os.Getenv("CA_CERT") == "" { + tb.Fatal("`CA_CERT` must be set for acceptance testing") } } @@ -267,42 +267,13 @@ func PreCheckAwsEnvPrivateLinkEndpointService(tb testing.TB) { } } -func PreCheckPAKCredsAreEmpty(tb testing.TB) { - tb.Helper() - if os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") != "" || os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") != "" { - tb.Fatal(`"MONGODB_ATLAS_PUBLIC_KEY" and "MONGODB_ATLAS_PRIVATE_KEY" are defined in this test and they should not.`) - } -} - -func PreCheckSACredsAreEmpty(tb testing.TB) { - tb.Helper() - if os.Getenv("MONGODB_ATLAS_CLIENT_ID") != "" || os.Getenv("MONGODB_ATLAS_CLIENT_SECRET") != "" { - tb.Fatal(`"MONGODB_ATLAS_CLIENT_ID" and "MONGODB_ATLAS_CLIENT_SECRET" are defined in this test and they should not.`) - } -} - func PreCheckSTSAssumeRole(tb testing.TB) { tb.Helper() - if os.Getenv("AWS_REGION") == "" { - tb.Fatal(`'AWS_REGION' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("STS_ENDPOINT") == "" { - tb.Fatal(`'STS_ENDPOINT' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("ASSUME_ROLE_ARN") == "" { - tb.Fatal(`'ASSUME_ROLE_ARN' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("AWS_ACCESS_KEY_ID") == "" { - tb.Fatal(`'AWS_ACCESS_KEY_ID' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { - tb.Fatal(`'AWS_SECRET_ACCESS_KEY' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("AWS_SESSION_TOKEN") == "" { - tb.Fatal(`'AWS_SESSION_TOKEN' must be set for acceptance testing with STS Assume Role.`) - } - if os.Getenv("SECRET_NAME") == "" { - tb.Fatal(`'SECRET_NAME' must be set for acceptance testing with STS Assume Role.`) + envVars := []string{"AWS_REGION", "STS_ENDPOINT", "ASSUME_ROLE_ARN", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "SECRET_NAME"} + for _, envVar := range envVars { + if os.Getenv(envVar) == "" { + tb.Fatalf("`%s` must be set for acceptance testing with STS Assume Role.", envVar) + } } } @@ -383,14 +354,6 @@ func PreCheckAwsMsk(tb testing.TB) { } } -func PreCheckServiceAccount(tb testing.TB) { - tb.Helper() - if os.Getenv("MONGODB_ATLAS_CLIENT_ID") == "" || - os.Getenv("MONGODB_ATLAS_CLIENT_SECRET") == "" { - tb.Fatal("`MONGODB_ATLAS_CLIENT_ID`, `MONGODB_ATLAS_CLIENT_SECRET` must be set for Service Account acceptance testing") - } -} - func PreCheckAccessToken(tb testing.TB) { tb.Helper() if os.Getenv("MONGODB_ATLAS_OAUTH_TOKEN") == "" { diff --git a/internal/testutil/acc/skip.go b/internal/testutil/acc/skip.go index cd3fcb820d..8e3db37f0e 100644 --- a/internal/testutil/acc/skip.go +++ b/internal/testutil/acc/skip.go @@ -31,3 +31,25 @@ func SkipInUnitTest(tb testing.TB) { func InUnitTest() bool { return os.Getenv("TF_ACC") == "" } + +func HasPAKCreds() bool { + return os.Getenv("MONGODB_ATLAS_PUBLIC_KEY") != "" || os.Getenv("MONGODB_ATLAS_PRIVATE_KEY") != "" +} + +func HasSACreds() bool { + return os.Getenv("MONGODB_ATLAS_CLIENT_ID") != "" || os.Getenv("MONGODB_ATLAS_CLIENT_SECRET") != "" +} + +func SkipInSA(tb testing.TB, description string) { + tb.Helper() + if HasSACreds() { + tb.Skip(description) + } +} + +func SkipInPAK(tb testing.TB, description string) { + tb.Helper() + if HasPAKCreds() { + tb.Skip(description) + } +} From 394b811dbe72bda977f65e2e419b028d074a73f3 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Fri, 3 Oct 2025 11:10:13 +0200 Subject: [PATCH 5/8] chore: Fix SA dev branch merge (#3744) --- .github/workflows/acceptance-tests-runner.yml | 8 +++++--- Makefile | 9 +++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 9f38d7b051..3fd5fcaa57 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -196,9 +196,10 @@ env: TF_ACC: 1 TF_LOG: ${{ vars.LOG_LEVEL }} ACCTEST_TIMEOUT: ${{ vars.ACCTEST_TIMEOUT }} - # Only Migration tests are run when a specific previous provider version is set - # If the name (regex) of the test is set, only that test is run - ACCTEST_REGEX_RUN: ${{ inputs.test_name || inputs.provider_version == '' && '^Test(Acc|Mig)' || '^TestMig' }} + # If the name (regex) of the test is set, only that test is run. + # Don't run migration tests if using Service Accounts because previous provider versions don't support SA yet. + # Only Migration tests are run when a specific previous provider version is set. + ACCTEST_REGEX_RUN: ${{ inputs.test_name || inputs.use_sa && '^TestAcc' || inputs.provider_version == '' && '^Test(Acc|Mig)' || '^TestMig' }} MONGODB_ATLAS_BASE_URL: ${{ inputs.mongodb_atlas_base_url }} MONGODB_REALM_BASE_URL: ${{ inputs.mongodb_realm_base_url }} MONGODB_ATLAS_ORG_ID: ${{ inputs.mongodb_atlas_org_id }} @@ -548,6 +549,7 @@ jobs: MONGODB_ATLAS_CLIENT_ID: ${{ secrets.mongodb_atlas_client_id }} MONGODB_ATLAS_CLIENT_SECRET: ${{ secrets.mongodb_atlas_client_secret }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} + ACCTEST_REGEX_RUN: '^TestAcc' # Don't run migration tests because previous provider versions don't support SA. ACCTEST_PACKAGES: | ./internal/service/alertconfiguration ./internal/service/databaseuser diff --git a/Makefile b/Makefile index a475f38f88..baad669d54 100644 --- a/Makefile +++ b/Makefile @@ -43,11 +43,16 @@ test: fmtcheck ## Run unit tests @$(eval export MONGODB_ATLAS_ORG_ID?=111111111111111111111111) @$(eval export MONGODB_ATLAS_PROJECT_ID?=111111111111111111111111) @$(eval export MONGODB_ATLAS_CLUSTER_NAME?=mocked-cluster) + @$(eval export MONGODB_ATLAS_PUBLIC_KEY=dummy) + @$(eval export MONGODB_ATLAS_PRIVATE_KEY=dummy) + @$(eval export MONGODB_ATLAS_CLIENT_ID=) + @$(eval export MONGODB_ATLAS_CLIENT_SECRET=) + @$(eval export MONGODB_ATLAS_ACCESS_TOKEN=) go test ./... -timeout=120s -parallel=$(PARALLEL_GO_TEST) -race .PHONY: testmact testmact: ## Run MacT tests (mocked acc tests) - @$(eval ACCTEST_REGEX_RUN?=^TestAccMockable) + @$(eval export ACCTEST_REGEX_RUN?=^TestAccMockable) @$(eval export HTTP_MOCKER_REPLAY?=true) @$(eval export HTTP_MOCKER_CAPTURE?=false) @$(eval export MONGODB_ATLAS_ORG_ID?=111111111111111111111111) @@ -72,7 +77,7 @@ testmact-capture: ## Capture HTTP traffic for MacT tests .PHONY: testacc testacc: fmtcheck ## Run acc & mig tests (acceptance & migration tests) - @$(eval ACCTEST_REGEX_RUN?=^TestAcc) + @$(eval export ACCTEST_REGEX_RUN?=^TestAcc) TF_ACC=1 go test $(ACCTEST_PACKAGES) -run '$(ACCTEST_REGEX_RUN)' -v -parallel $(PARALLEL_GO_TEST) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) -ldflags="$(LINKER_FLAGS)" .PHONY: testaccgov From 5d09ca0b22b8e0f246612335b83c54b2a090d529 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 6 Oct 2025 15:11:18 +0200 Subject: [PATCH 6/8] chore: Implement credential type hierarchy (#3738) --- .changelog/3738.txt | 3 + internal/config/client.go | 307 +++++------ internal/config/credentials.go | 185 +++++++ internal/config/credentials_test.go | 504 ++++++++++++++++++ internal/config/service_account.go | 16 +- internal/config/transport_test.go | 6 +- internal/provider/aws_credentials.go | 53 +- internal/provider/provider.go | 279 ++-------- internal/provider/provider_sdk2.go | 270 ++-------- .../eventtrigger/data_source_event_trigger.go | 2 +- .../data_source_event_triggers.go | 3 +- .../eventtrigger/resource_event_trigger.go | 11 +- .../resource_event_trigger_test.go | 4 +- .../organization/resource_organization.go | 31 +- .../resource_organization_test.go | 10 +- internal/testutil/acc/factory.go | 9 +- internal/testutil/acc/pre_check.go | 4 +- 17 files changed, 982 insertions(+), 715 deletions(-) create mode 100644 .changelog/3738.txt create mode 100644 internal/config/credentials.go create mode 100644 internal/config/credentials_test.go diff --git a/.changelog/3738.txt b/.changelog/3738.txt new file mode 100644 index 0000000000..2c423e83a5 --- /dev/null +++ b/.changelog/3738.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Enforces strict hierarchy when selecting the credential source such as AWS Secrets Manager, provider attributes, or environment variables to prevent combining with values from different sources +``` diff --git a/internal/config/client.go b/internal/config/client.go index 4c1b09b0f9..574eb579a0 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -17,13 +17,14 @@ import ( matlasClient "go.mongodb.org/atlas/mongodbatlas" realmAuth "go.mongodb.org/realm/auth" "go.mongodb.org/realm/realm" - "golang.org/x/oauth2" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" "github.com/mongodb-forks/digest" adminpreview "github.com/mongodb/atlas-sdk-go/admin" "github.com/mongodb/terraform-provider-mongodbatlas/version" + + "golang.org/x/oauth2" ) const ( @@ -41,41 +42,12 @@ const ( type AuthMethod int const ( - ServiceAccount AuthMethod = iota - Digest + Unknown AuthMethod = iota AccessToken - Unknown + ServiceAccount + Digest ) -// CredentialProvider interface for types that can provide MongoDB Atlas credentials -type CredentialProvider interface { - GetPublicKey() string - GetPrivateKey() string - GetClientID() string - GetClientSecret() string - GetAccessToken() string -} - -// IsDigestAuth checks if public/private key credentials are present -func IsDigestAuthPresent(cp CredentialProvider) bool { - return cp.GetPublicKey() != "" && cp.GetPrivateKey() != "" -} - -// IsServiceAccountAuth checks if client ID/secret credentials are present -func IsServiceAccountAuthPresent(cp CredentialProvider) bool { - return cp.GetClientID() != "" && cp.GetClientSecret() != "" -} - -// IsAccessTokenAuth checks if access token credentials are present -func IsAccessTokenAuthPresent(cp CredentialProvider) bool { - return cp.GetAccessToken() != "" -} - -// HasValidAuthCredentials checks if any valid authentication method is provided -func HasValidAuthCredentials(cp CredentialProvider) bool { - return IsDigestAuthPresent(cp) || IsServiceAccountAuthPresent(cp) || IsAccessTokenAuthPresent(cp) -} - var baseTransport = &http.Transport{ DialContext: (&net.Dialer{ Timeout: timeout, @@ -101,74 +73,33 @@ func tfLoggingInterceptor(base http.RoundTripper) http.RoundTripper { // MongoDBClient contains the mongodbatlas clients and configurations type MongoDBClient struct { - Atlas *matlasClient.Client - AtlasV2 *admin.APIClient - AtlasPreview *adminpreview.APIClient - AtlasV220240805 *admin20240805.APIClient // used in advanced_cluster to avoid adopting 2024-10-23 release with ISS autoscaling - AtlasV220240530 *admin20240530.APIClient // used in advanced_cluster and cloud_backup_schedule for avoiding breaking changes (supporting deprecated replication_specs.id) - AtlasV220241113 *admin20241113.APIClient // used in teams and atlas_users to avoiding breaking changes - Config *Config -} - -// Config contains the configurations needed to use SDKs -type Config struct { - AssumeRoleARN string - PublicKey string - PrivateKey string - BaseURL string - RealmBaseURL string - TerraformVersion string - ClientID string - ClientSecret string - AccessToken string -} - -// CredentialProvider implementation for Config -func (c *Config) GetPublicKey() string { return c.PublicKey } -func (c *Config) GetPrivateKey() string { return c.PrivateKey } -func (c *Config) GetClientID() string { return c.ClientID } -func (c *Config) GetClientSecret() string { return c.ClientSecret } -func (c *Config) GetAccessToken() string { return c.AccessToken } - -type SecretData struct { - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` + Atlas *matlasClient.Client + AtlasV2 *admin.APIClient + AtlasPreview *adminpreview.APIClient + AtlasV220240805 *admin20240805.APIClient // used in advanced_cluster to avoid adopting 2024-10-23 release with ISS autoscaling + AtlasV220240530 *admin20240530.APIClient // used in advanced_cluster and cloud_backup_schedule for avoiding breaking changes (supporting deprecated replication_specs.id) + AtlasV220241113 *admin20241113.APIClient // used in teams and atlas_users to avoiding breaking changes + Realm *RealmClient + BaseURL string // needed by organization resource + TerraformVersion string // needed by organization resource } -type UAMetadata struct { - Name string - Value string +type RealmClient struct { + publicKey string + privateKey string + realmBaseURL string + terraformVersion string } -func (c *Config) NewClient(ctx context.Context) (*MongoDBClient, error) { - transport := networkLoggingBaseTransport() - switch ResolveAuthMethod(c) { - case AccessToken: - tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ - AccessToken: c.AccessToken, - TokenType: "Bearer", // Use a static bearer token with oauth2 transport. - }) - transport = &oauth2.Transport{ - Source: tokenSource, - Base: networkLoggingBaseTransport(), - } - case ServiceAccount: - tokenSource, err := getTokenSource(c, networkLoggingBaseTransport()) - if err != nil { - return nil, err - } - transport = &oauth2.Transport{ - Source: tokenSource, - Base: networkLoggingBaseTransport(), - } - case Digest: - transport = digest.NewTransportWithHTTPRoundTripper(c.PublicKey, c.PrivateKey, networkLoggingBaseTransport()) - case Unknown: +func NewClient(c *Credentials, terraformVersion string) (*MongoDBClient, error) { + userAgent := userAgent(terraformVersion) + client, err := getHTTPClient(c) + if err != nil { + return nil, err } - client := &http.Client{Transport: tfLoggingInterceptor(transport)} // Initialize the old SDK - optsAtlas := []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent(c))} + optsAtlas := []matlasClient.ClientOpt{matlasClient.SetUserAgent(userAgent)} if c.BaseURL != "" { optsAtlas = append(optsAtlas, matlasClient.SetBaseURL(c.BaseURL)) } @@ -178,124 +109,137 @@ func (c *Config) NewClient(ctx context.Context) (*MongoDBClient, error) { } // Initialize the new SDK for different versions - sdkV2Client, err := c.newSDKV2Client(client) + sdkV2Client, err := newSDKV2Client(client, c.BaseURL, userAgent) if err != nil { return nil, err } - sdkPreviewClient, err := c.newSDKPreviewClient(client) + sdkPreviewClient, err := newSDKPreviewClient(client, c.BaseURL, userAgent) if err != nil { return nil, err } - sdkV220240530Client, err := c.newSDKV220240530Client(client) + sdkV220240530Client, err := newSDKV220240530Client(client, c.BaseURL, userAgent) if err != nil { return nil, err } - sdkV220240805Client, err := c.newSDKV220240805Client(client) + sdkV220240805Client, err := newSDKV220240805Client(client, c.BaseURL, userAgent) if err != nil { return nil, err } - sdkV220241113Client, err := c.newSDKV220241113Client(client) + sdkV220241113Client, err := newSDKV220241113Client(client, c.BaseURL, userAgent) if err != nil { return nil, err } + clients := &MongoDBClient{ - Atlas: atlasClient, - AtlasV2: sdkV2Client, - AtlasPreview: sdkPreviewClient, - AtlasV220240530: sdkV220240530Client, - AtlasV220240805: sdkV220240805Client, - AtlasV220241113: sdkV220241113Client, - Config: c, + Atlas: atlasClient, + AtlasV2: sdkV2Client, + AtlasPreview: sdkPreviewClient, + AtlasV220240530: sdkV220240530Client, + AtlasV220240805: sdkV220240805Client, + AtlasV220241113: sdkV220241113Client, + BaseURL: c.BaseURL, + TerraformVersion: terraformVersion, + Realm: &RealmClient{ + publicKey: c.PublicKey, + privateKey: c.PrivateKey, + realmBaseURL: c.RealmBaseURL, + terraformVersion: terraformVersion, + }, } return clients, nil } -func (c *Config) newSDKV2Client(client *http.Client) (*admin.APIClient, error) { - opts := []admin.ClientModifier{ - admin.UseHTTPClient(client), - admin.UseUserAgent(userAgent(c)), - admin.UseBaseURL(c.BaseURL), - admin.UseDebug(false)} - - sdk, err := admin.NewClient(opts...) - if err != nil { - return nil, err +func getHTTPClient(c *Credentials) (*http.Client, error) { + transport := networkLoggingBaseTransport() + switch c.AuthMethod() { + case AccessToken: + tokenSource := oauth2.StaticTokenSource(&oauth2.Token{ + AccessToken: c.AccessToken, + TokenType: "Bearer", // Use a static bearer token with oauth2 transport. + }) + transport = &oauth2.Transport{ + Source: tokenSource, + Base: networkLoggingBaseTransport(), + } + case ServiceAccount: + tokenSource, err := getTokenSource(c.ClientID, c.ClientSecret, c.BaseURL, networkLoggingBaseTransport()) + if err != nil { + return nil, err + } + transport = &oauth2.Transport{ + Source: tokenSource, + Base: networkLoggingBaseTransport(), + } + case Digest: + transport = digest.NewTransportWithHTTPRoundTripper(c.PublicKey, c.PrivateKey, networkLoggingBaseTransport()) + case Unknown: } - return sdk, nil + return &http.Client{Transport: tfLoggingInterceptor(transport)}, nil } -func (c *Config) newSDKPreviewClient(client *http.Client) (*adminpreview.APIClient, error) { - opts := []adminpreview.ClientModifier{ - adminpreview.UseHTTPClient(client), - adminpreview.UseUserAgent(userAgent(c)), - adminpreview.UseBaseURL(c.BaseURL), - adminpreview.UseDebug(false)} +func newSDKV2Client(client *http.Client, baseURL, userAgent string) (*admin.APIClient, error) { + return admin.NewClient( + admin.UseHTTPClient(client), + admin.UseUserAgent(userAgent), + admin.UseBaseURL(baseURL), + admin.UseDebug(false), + ) +} - sdk, err := adminpreview.NewClient(opts...) - if err != nil { - return nil, err - } - return sdk, nil +func newSDKPreviewClient(client *http.Client, baseURL, userAgent string) (*adminpreview.APIClient, error) { + return adminpreview.NewClient( + adminpreview.UseHTTPClient(client), + adminpreview.UseUserAgent(userAgent), + adminpreview.UseBaseURL(baseURL), + adminpreview.UseDebug(false), + ) } -func (c *Config) newSDKV220240530Client(client *http.Client) (*admin20240530.APIClient, error) { - opts := []admin20240530.ClientModifier{ +func newSDKV220240530Client(client *http.Client, baseURL, userAgent string) (*admin20240530.APIClient, error) { + return admin20240530.NewClient( admin20240530.UseHTTPClient(client), - admin20240530.UseUserAgent(userAgent(c)), - admin20240530.UseBaseURL(c.BaseURL), - admin20240530.UseDebug(false)} - - sdk, err := admin20240530.NewClient(opts...) - if err != nil { - return nil, err - } - return sdk, nil + admin20240530.UseUserAgent(userAgent), + admin20240530.UseBaseURL(baseURL), + admin20240530.UseDebug(false), + ) } -func (c *Config) newSDKV220240805Client(client *http.Client) (*admin20240805.APIClient, error) { - opts := []admin20240805.ClientModifier{ +func newSDKV220240805Client(client *http.Client, baseURL, userAgent string) (*admin20240805.APIClient, error) { + return admin20240805.NewClient( admin20240805.UseHTTPClient(client), - admin20240805.UseUserAgent(userAgent(c)), - admin20240805.UseBaseURL(c.BaseURL), - admin20240805.UseDebug(false)} - - sdk, err := admin20240805.NewClient(opts...) - if err != nil { - return nil, err - } - return sdk, nil + admin20240805.UseUserAgent(userAgent), + admin20240805.UseBaseURL(baseURL), + admin20240805.UseDebug(false), + ) } -func (c *Config) newSDKV220241113Client(client *http.Client) (*admin20241113.APIClient, error) { - opts := []admin20241113.ClientModifier{ +func newSDKV220241113Client(client *http.Client, baseURL, userAgent string) (*admin20241113.APIClient, error) { + return admin20241113.NewClient( admin20241113.UseHTTPClient(client), - admin20241113.UseUserAgent(userAgent(c)), - admin20241113.UseBaseURL(c.BaseURL), - admin20241113.UseDebug(false)} - - sdk, err := admin20241113.NewClient(opts...) - if err != nil { - return nil, err - } - return sdk, nil + admin20241113.UseUserAgent(userAgent), + admin20241113.UseBaseURL(baseURL), + admin20241113.UseDebug(false), + ) } -func (c *MongoDBClient) GetRealmClient(ctx context.Context) (*realm.Client, error) { - // Realm - if c.Config.PublicKey == "" && c.Config.PrivateKey == "" { +// Get in RealmClient is a method instead of Atlas fields so it's lazy initialized as it needs a roundtrip to authenticate. +func (r *RealmClient) Get(ctx context.Context) (*realm.Client, error) { + if r.publicKey == "" && r.privateKey == "" { return nil, errors.New("please set `public_key` and `private_key` in order to use the realm client") } - optsRealm := []realm.ClientOpt{realm.SetUserAgent(userAgent(c.Config))} + optsRealm := []realm.ClientOpt{ + realm.SetUserAgent(userAgent(r.terraformVersion)), + } authConfig := realmAuth.NewConfig(nil) - if c.Config.BaseURL != "" && c.Config.RealmBaseURL != "" { - adminURL := c.Config.RealmBaseURL + "api/admin/v3.0/" + if r.realmBaseURL != "" { + adminURL := r.realmBaseURL + "api/admin/v3.0/" optsRealm = append(optsRealm, realm.SetBaseURL(adminURL)) authConfig.AuthURL, _ = url.Parse(adminURL + "auth/providers/mongodb-cloud/login") } - token, err := authConfig.NewTokenFromCredentials(ctx, c.Config.PublicKey, c.Config.PrivateKey) + token, err := authConfig.NewTokenFromCredentials(ctx, r.publicKey, r.privateKey) if err != nil { return nil, err } @@ -359,30 +303,21 @@ func (c *MongoDBClient) UntypedAPICall(ctx context.Context, params *APICallParam return apiResp, err } -func userAgent(c *Config) string { - metadata := []UAMetadata{ +func userAgent(terraformVersion string) string { + metadata := []struct { + Name string + Value string + }{ {toolName, version.ProviderVersion}, - {terraformPlatformName, c.TerraformVersion}, + {terraformPlatformName, terraformVersion}, } var parts []string for _, info := range metadata { + if info.Value == "" { + continue + } part := fmt.Sprintf("%s/%s", info.Name, info.Value) parts = append(parts, part) } - return strings.Join(parts, " ") } - -// ResolveAuthMethod determines the authentication method from any credential provider -func ResolveAuthMethod(cg CredentialProvider) AuthMethod { - if IsAccessTokenAuthPresent(cg) { - return AccessToken - } - if IsServiceAccountAuthPresent(cg) { - return ServiceAccount - } - if IsDigestAuthPresent(cg) { - return Digest - } - return Unknown -} diff --git a/internal/config/credentials.go b/internal/config/credentials.go new file mode 100644 index 0000000000..14adc7bdb4 --- /dev/null +++ b/internal/config/credentials.go @@ -0,0 +1,185 @@ +package config + +import ( + "os" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" +) + +// Credentials has all the authentication fields, it also matches with fields that can be stored in AWS Secrets Manager. +type Credentials struct { + AccessToken string `json:"access_token"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + PublicKey string `json:"public_key"` + PrivateKey string `json:"private_key"` + BaseURL string `json:"base_url"` + RealmBaseURL string `json:"realm_base_url"` +} + +// GetCredentials follows the order of AWS Secrets Manager, provider vars and env vars. +func GetCredentials(providerVars, envVars *Vars, getAWSCredentials func(*AWSVars) (*Credentials, error)) (*Credentials, error) { + if awsVars := CoalesceAWSVars(providerVars.GetAWS(), envVars.GetAWS()); awsVars != nil { + awsCredentials, err := getAWSCredentials(awsVars) + if err != nil { + return nil, err + } + return awsCredentials, nil + } + if c := CoalesceCredentials(providerVars.GetCredentials(), envVars.GetCredentials()); c != nil { + return c, nil + } + return &Credentials{}, nil +} + +// AuthMethod follows the order of token, SA and PAK. +func (c *Credentials) AuthMethod() AuthMethod { + switch { + case c.HasAccessToken(): + return AccessToken + case c.HasServiceAccount(): + return ServiceAccount + case c.HasDigest(): + return Digest + default: + return Unknown + } +} + +func (c *Credentials) HasAccessToken() bool { + return c.AccessToken != "" +} + +func (c *Credentials) HasServiceAccount() bool { + return c.ClientID != "" || c.ClientSecret != "" +} + +func (c *Credentials) HasDigest() bool { + return c.PublicKey != "" || c.PrivateKey != "" +} + +func (c *Credentials) IsPresent() bool { + return c.AuthMethod() != Unknown +} + +func (c *Credentials) Warnings() string { + if !c.IsPresent() { + return "No credentials set" + } + // Prefer specific checks over generic code as there are few combinations and code is clearer. + if c.HasAccessToken() && c.HasServiceAccount() && c.HasDigest() { + return "Access Token will be used although Service Account and API Keys are also set" + } + if c.HasAccessToken() && c.HasServiceAccount() { + return "Access Token will be used although Service Account is also set" + } + if c.HasAccessToken() && c.HasDigest() { + return "Access Token will be used although API Keys is also set" + } + if c.HasServiceAccount() && c.HasDigest() { + return "Service Account will be used although API Keys is also set" + } + return "" +} + +type AWSVars struct { + AssumeRoleARN string + SecretName string + Region string + AccessKeyID string + SecretAccessKey string + SessionToken string + Endpoint string +} + +func (a *AWSVars) IsPresent() bool { + return a.AssumeRoleARN != "" +} + +type Vars struct { + AccessToken string + ClientID string + ClientSecret string + PublicKey string + PrivateKey string + BaseURL string + RealmBaseURL string + AWSAssumeRoleARN string + AWSSecretName string + AWSRegion string + AWSAccessKeyID string + AWSSecretAccessKey string + AWSSessionToken string + AWSEndpoint string +} + +func NewEnvVars() *Vars { + return &Vars{ + AccessToken: getEnv("MONGODB_ATLAS_ACCESS_TOKEN", "TF_VAR_ACCESS_TOKEN"), + ClientID: getEnv("MONGODB_ATLAS_CLIENT_ID", "TF_VAR_CLIENT_ID"), + ClientSecret: getEnv("MONGODB_ATLAS_CLIENT_SECRET", "TF_VAR_CLIENT_SECRET"), + PublicKey: getEnv("MONGODB_ATLAS_PUBLIC_API_KEY", "MONGODB_ATLAS_PUBLIC_KEY", "MCLI_PUBLIC_API_KEY"), + PrivateKey: getEnv("MONGODB_ATLAS_PRIVATE_API_KEY", "MONGODB_ATLAS_PRIVATE_KEY", "MCLI_PRIVATE_API_KEY"), + BaseURL: getEnv("MONGODB_ATLAS_BASE_URL", "MCLI_OPS_MANAGER_URL"), + RealmBaseURL: getEnv("MONGODB_REALM_BASE_URL"), + AWSAssumeRoleARN: getEnv("ASSUME_ROLE_ARN", "TF_VAR_ASSUME_ROLE_ARN"), + AWSSecretName: getEnv("SECRET_NAME", "TF_VAR_SECRET_NAME"), + AWSRegion: getEnv("AWS_REGION", "TF_VAR_AWS_REGION"), + AWSAccessKeyID: getEnv("AWS_ACCESS_KEY_ID", "TF_VAR_AWS_ACCESS_KEY_ID"), + AWSSecretAccessKey: getEnv("AWS_SECRET_ACCESS_KEY", "TF_VAR_AWS_SECRET_ACCESS_KEY"), + AWSSessionToken: getEnv("AWS_SESSION_TOKEN", "TF_VAR_AWS_SESSION_TOKEN"), + AWSEndpoint: getEnv("STS_ENDPOINT", "TF_VAR_STS_ENDPOINT"), + } +} + +func (e *Vars) GetCredentials() *Credentials { + return &Credentials{ + AccessToken: e.AccessToken, + ClientID: e.ClientID, + ClientSecret: e.ClientSecret, + PublicKey: e.PublicKey, + PrivateKey: e.PrivateKey, + BaseURL: e.BaseURL, + RealmBaseURL: e.RealmBaseURL, + } +} + +// GetAWS returns variables in the format AWS expects, e.g. region in lowercase. +func (e *Vars) GetAWS() *AWSVars { + return &AWSVars{ + AssumeRoleARN: e.AWSAssumeRoleARN, + SecretName: e.AWSSecretName, + Region: conversion.MongoDBRegionToAWSRegion(e.AWSRegion), + AccessKeyID: e.AWSAccessKeyID, + SecretAccessKey: e.AWSSecretAccessKey, + SessionToken: e.AWSSessionToken, + Endpoint: e.AWSEndpoint, + } +} + +func getEnv(key ...string) string { + for _, k := range key { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" +} + +func CoalesceAWSVars(awsVars ...*AWSVars) *AWSVars { + for _, awsVar := range awsVars { + if awsVar.IsPresent() { + return awsVar + } + } + return nil +} + +func CoalesceCredentials(credentials ...*Credentials) *Credentials { + for _, credential := range credentials { + if credential.IsPresent() { + return credential + } + } + return nil +} diff --git a/internal/config/credentials_test.go b/internal/config/credentials_test.go new file mode 100644 index 0000000000..86641189eb --- /dev/null +++ b/internal/config/credentials_test.go @@ -0,0 +1,504 @@ +package config_test + +import ( + "errors" + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCredentials_AuthMethod(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want config.AuthMethod + }{ + "Empty credentials returns Unknown": { + credentials: config.Credentials{}, + want: config.Unknown, + }, + "Access token takes priority": { + credentials: config.Credentials{ + AccessToken: "token", + ClientID: "id", + ClientSecret: "secret", + PublicKey: "public", + PrivateKey: "private", + }, + want: config.AccessToken, + }, + "Service account when no access token": { + credentials: config.Credentials{ + ClientID: "id", + ClientSecret: "secret", + PublicKey: "public", + PrivateKey: "private", + }, + want: config.ServiceAccount, + }, + "Service account with only ClientID": { + credentials: config.Credentials{ + ClientID: "id", + }, + want: config.ServiceAccount, + }, + "Service account with only ClientSecret": { + credentials: config.Credentials{ + ClientSecret: "secret", + }, + want: config.ServiceAccount, + }, + "Digest when only digest credentials": { + credentials: config.Credentials{ + PublicKey: "public", + PrivateKey: "private", + }, + want: config.Digest, + }, + "Digest with only PublicKey": { + credentials: config.Credentials{ + PublicKey: "public", + }, + want: config.Digest, + }, + "Digest with only PrivateKey": { + credentials: config.Credentials{ + PrivateKey: "private", + }, + want: config.Digest, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.AuthMethod() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCredentials_HasAccessToken(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want bool + }{ + "Empty credentials": { + credentials: config.Credentials{}, + want: false, + }, + "With access token": { + credentials: config.Credentials{ + AccessToken: "token", + }, + want: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.HasAccessToken() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCredentials_HasServiceAccount(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want bool + }{ + "Empty credentials": { + credentials: config.Credentials{}, + want: false, + }, + "With ClientID only": { + credentials: config.Credentials{ + ClientID: "id", + }, + want: true, + }, + "With ClientSecret only": { + credentials: config.Credentials{ + ClientSecret: "secret", + }, + want: true, + }, + "With both ClientID and ClientSecret": { + credentials: config.Credentials{ + ClientID: "id", + ClientSecret: "secret", + }, + want: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.HasServiceAccount() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCredentials_HasDigest(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want bool + }{ + "Empty credentials": { + credentials: config.Credentials{}, + want: false, + }, + "With PublicKey only": { + credentials: config.Credentials{ + PublicKey: "public", + }, + want: true, + }, + "With PrivateKey only": { + credentials: config.Credentials{ + PrivateKey: "private", + }, + want: true, + }, + "With both PublicKey and PrivateKey": { + credentials: config.Credentials{ + PublicKey: "public", + PrivateKey: "private", + }, + want: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.HasDigest() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCredentials_IsPresent(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want bool + }{ + "Empty credentials": { + credentials: config.Credentials{}, + want: false, + }, + "With access token": { + credentials: config.Credentials{ + AccessToken: "token", + }, + want: true, + }, + "With service account": { + credentials: config.Credentials{ + ClientID: "id", + }, + want: true, + }, + "With digest": { + credentials: config.Credentials{ + PublicKey: "public", + }, + want: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.IsPresent() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCredentials_Warnings(t *testing.T) { + testCases := map[string]struct { + credentials config.Credentials + want string + }{ + "No credentials": { + credentials: config.Credentials{}, + want: "No credentials set", + }, + "Only access token - no warning": { + credentials: config.Credentials{ + AccessToken: "token", + }, + want: "", + }, + "Only service account - no warning": { + credentials: config.Credentials{ + ClientID: "id", + }, + want: "", + }, + "Only digest - no warning": { + credentials: config.Credentials{ + PublicKey: "public", + }, + want: "", + }, + "Access token and service account": { + credentials: config.Credentials{ + AccessToken: "token", + ClientID: "id", + ClientSecret: "secret", + }, + want: "Access Token will be used although Service Account is also set", + }, + "Access token and digest": { + credentials: config.Credentials{ + AccessToken: "token", + PublicKey: "public", + PrivateKey: "private", + }, + want: "Access Token will be used although API Keys is also set", + }, + "Service account and digest": { + credentials: config.Credentials{ + ClientID: "id", + PublicKey: "public", + PrivateKey: "private", + }, + want: "Service Account will be used although API Keys is also set", + }, + "All three methods": { + credentials: config.Credentials{ + AccessToken: "token", + ClientID: "id", + ClientSecret: "secret", + PublicKey: "public", + PrivateKey: "private", + }, + want: "Access Token will be used although Service Account and API Keys are also set", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.credentials.Warnings() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestGetCredentials(t *testing.T) { + mockGetAWSCredentials := func(awsVars *config.AWSVars) (*config.Credentials, error) { + if awsVars.AssumeRoleARN == "error" { + return nil, errors.New("AWS error") + } + return &config.Credentials{ + AccessToken: "aws-token", + }, nil + } + + testCases := map[string]struct { + providerVars *config.Vars + envVars *config.Vars + want *config.Credentials + wantErr bool + }{ + "AWS credentials take priority": { + providerVars: &config.Vars{ + AWSAssumeRoleARN: "arn", + PublicKey: "provider-public", + }, + envVars: &config.Vars{ + PublicKey: "env-public", + }, + want: &config.Credentials{ + AccessToken: "aws-token", + }, + wantErr: false, + }, + "AWS credentials error": { + providerVars: &config.Vars{ + AWSAssumeRoleARN: "error", + }, + envVars: &config.Vars{}, + want: nil, + wantErr: true, + }, + "Provider vars take priority over env vars": { + providerVars: &config.Vars{ + PublicKey: "provider-public", + }, + envVars: &config.Vars{ + PublicKey: "env-public", + }, + want: &config.Credentials{ + PublicKey: "provider-public", + }, + wantErr: false, + }, + "Env vars when no provider vars": { + providerVars: &config.Vars{}, + envVars: &config.Vars{ + PublicKey: "env-public", + }, + want: &config.Credentials{ + PublicKey: "env-public", + }, + wantErr: false, + }, + "Empty credentials when nothing provided": { + providerVars: &config.Vars{}, + envVars: &config.Vars{}, + want: &config.Credentials{}, + wantErr: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got, err := config.GetCredentials(tc.providerVars, tc.envVars, mockGetAWSCredentials) + if tc.wantErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.want, got) + } + }) + } +} + +func TestAWSVars_IsPresent(t *testing.T) { + testCases := map[string]struct { + awsVars *config.AWSVars + want bool + }{ + "Empty AWS vars": { + awsVars: &config.AWSVars{}, + want: false, + }, + "With AssumeRoleARN": { + awsVars: &config.AWSVars{ + AssumeRoleARN: "arn", + }, + want: true, + }, + "With other fields but no AssumeRoleARN": { + awsVars: &config.AWSVars{ + SecretName: "secret", + Region: "us-east-1", + }, + want: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := tc.awsVars.IsPresent() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestNewEnvVars(t *testing.T) { + // Test the first env var for each attribute. + t.Setenv("MONGODB_ATLAS_ACCESS_TOKEN", "env-token") + t.Setenv("MONGODB_ATLAS_CLIENT_ID", "env-client-id") + t.Setenv("MONGODB_ATLAS_CLIENT_SECRET", "env-client-secret") + t.Setenv("MONGODB_ATLAS_PUBLIC_API_KEY", "env-public") + t.Setenv("MONGODB_ATLAS_PRIVATE_API_KEY", "env-private") + t.Setenv("MONGODB_ATLAS_BASE_URL", "url1") + t.Setenv("MONGODB_REALM_BASE_URL", "url2") + t.Setenv("ASSUME_ROLE_ARN", "arn") + t.Setenv("SECRET_NAME", "env-secret") + t.Setenv("AWS_REGION", "us-west-2") + t.Setenv("AWS_ACCESS_KEY_ID", "env-access") + t.Setenv("AWS_SECRET_ACCESS_KEY", "env-secret-key") + t.Setenv("AWS_SESSION_TOKEN", "env-token") + t.Setenv("STS_ENDPOINT", "https://sts.amazonaws.com") + + vars := config.NewEnvVars() + assert.Equal(t, "env-token", vars.AccessToken) + assert.Equal(t, "env-client-id", vars.ClientID) + assert.Equal(t, "env-client-secret", vars.ClientSecret) + assert.Equal(t, "env-public", vars.PublicKey) + assert.Equal(t, "env-private", vars.PrivateKey) + assert.Equal(t, "url1", vars.BaseURL) + assert.Equal(t, "url2", vars.RealmBaseURL) + assert.Equal(t, "arn", vars.AWSAssumeRoleARN) + assert.Equal(t, "env-secret", vars.AWSSecretName) + assert.Equal(t, "us-west-2", vars.AWSRegion) + assert.Equal(t, "env-access", vars.AWSAccessKeyID) + assert.Equal(t, "env-secret-key", vars.AWSSecretAccessKey) + assert.Equal(t, "env-token", vars.AWSSessionToken) + assert.Equal(t, "https://sts.amazonaws.com", vars.AWSEndpoint) +} + +func TestCoalesceAWSVars(t *testing.T) { + awsVars1 := &config.AWSVars{AssumeRoleARN: "arn1"} + awsVars2 := &config.AWSVars{AssumeRoleARN: "arn2"} + awsVarsEmpty := &config.AWSVars{} + + testCases := map[string]struct { + want *config.AWSVars + awsVars []*config.AWSVars + }{ + "First present AWS vars": { + awsVars: []*config.AWSVars{awsVars1, awsVars2}, + want: awsVars1, + }, + "Skip empty, return first present": { + awsVars: []*config.AWSVars{awsVarsEmpty, awsVars2}, + want: awsVars2, + }, + "All empty returns nil": { + awsVars: []*config.AWSVars{awsVarsEmpty, awsVarsEmpty}, + want: nil, + }, + "No vars returns nil": { + awsVars: []*config.AWSVars{}, + want: nil, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := config.CoalesceAWSVars(tc.awsVars...) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestCoalesceCredentials(t *testing.T) { + creds1 := &config.Credentials{PublicKey: "key1"} + creds2 := &config.Credentials{PublicKey: "key2"} + credsEmpty := &config.Credentials{} + + testCases := map[string]struct { + want *config.Credentials + credentials []*config.Credentials + }{ + "First present credentials": { + credentials: []*config.Credentials{creds1, creds2}, + want: creds1, + }, + "Skip empty, return first present": { + credentials: []*config.Credentials{credsEmpty, creds2}, + want: creds2, + }, + "All empty returns nil": { + credentials: []*config.Credentials{credsEmpty, credsEmpty}, + want: nil, + }, + "No credentials returns nil": { + credentials: []*config.Credentials{}, + want: nil, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + got := config.CoalesceCredentials(tc.credentials...) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/internal/config/service_account.go b/internal/config/service_account.go index 6a214e9b17..84b41cc64e 100644 --- a/internal/config/service_account.go +++ b/internal/config/service_account.go @@ -24,20 +24,20 @@ var saInfo = struct { mu sync.Mutex }{} -func getTokenSource(c *Config, tokenRenewalBase http.RoundTripper) (auth.TokenSource, error) { +func getTokenSource(clientID, clientSecret, baseURL string, tokenRenewalBase http.RoundTripper) (auth.TokenSource, error) { saInfo.mu.Lock() defer saInfo.mu.Unlock() + baseURL = strings.TrimRight(baseURL, "/") if saInfo.tokenSource != nil { // Token source in cache. - if saInfo.clientID != c.ClientID || saInfo.clientSecret != c.ClientSecret || saInfo.baseURL != c.BaseURL { + if saInfo.clientID != clientID || saInfo.clientSecret != clientSecret || saInfo.baseURL != baseURL { return nil, fmt.Errorf("service account credentials changed") } return saInfo.tokenSource, nil } - conf := clientcredentials.NewConfig(c.ClientID, c.ClientSecret) - if c.BaseURL != "" { - baseURL := strings.TrimRight(c.BaseURL, "/") + conf := clientcredentials.NewConfig(clientID, clientSecret) + if baseURL != "" { conf.TokenURL = baseURL + clientcredentials.TokenAPIPath conf.RevokeURL = baseURL + clientcredentials.RevokeAPIPath } @@ -47,9 +47,9 @@ func getTokenSource(c *Config, tokenRenewalBase http.RoundTripper) (auth.TokenSo if _, err := tokenSource.Token(); err != nil { // Retrieve token to fail-fast if credentials are invalid. return nil, err } - saInfo.clientID = c.ClientID - saInfo.clientSecret = c.ClientSecret - saInfo.baseURL = c.BaseURL + saInfo.clientID = clientID + saInfo.clientSecret = clientSecret + saInfo.baseURL = baseURL saInfo.tokenSource = tokenSource return saInfo.tokenSource, nil } diff --git a/internal/config/transport_test.go b/internal/config/transport_test.go index b718ac1b72..2c54b5d77f 100644 --- a/internal/config/transport_test.go +++ b/internal/config/transport_test.go @@ -159,17 +159,17 @@ func TestAccNetworkLogging(t *testing.T) { var logOutput bytes.Buffer log.SetOutput(&logOutput) defer log.SetOutput(os.Stderr) - cfg := &config.Config{ + c := &config.Credentials{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), PrivateKey: os.Getenv("MONGODB_ATLAS_PRIVATE_KEY"), ClientID: os.Getenv("MONGODB_ATLAS_CLIENT_ID"), ClientSecret: os.Getenv("MONGODB_ATLAS_CLIENT_SECRET"), BaseURL: os.Getenv("MONGODB_ATLAS_BASE_URL"), } - client, err := cfg.NewClient(t.Context()) + client, err := config.NewClient(c, "") require.NoError(t, err) - // Make a simple API call that should trigger our enhanced logging + // Make a simple API call that should trigger our enhanced logging. _, _, err = client.AtlasV2.OrganizationsApi.ListOrgs(t.Context()).Execute() require.NoError(t, err) logStr := logOutput.String() diff --git a/internal/provider/aws_credentials.go b/internal/provider/aws_credentials.go index 335589d681..34ceaf93fb 100644 --- a/internal/provider/aws_credentials.go +++ b/internal/provider/aws_credentials.go @@ -25,63 +25,34 @@ const ( minSegmentsForSTSRegionalHost = 4 ) -func configureCredentialsSTS(cfg *config.Config, secret, region, awsAccessKeyID, awsSecretAccessKey, awsSessionToken, endpoint string) (config.Config, error) { +func getAWSCredentials(c *config.AWSVars) (*config.Credentials, error) { defaultResolver := endpoints.DefaultResolver() stsCustResolverFn := func(service, _ string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { if service == sts.EndpointsID { - resolved, err := ResolveSTSEndpoint(endpoint, region) + resolved, err := ResolveSTSEndpoint(c.Endpoint, c.Region) if err != nil { return endpoints.ResolvedEndpoint{}, err } return resolved, nil } - return defaultResolver.EndpointFor(service, region, optFns...) + return defaultResolver.EndpointFor(service, c.Region, optFns...) } - sess := session.Must(session.NewSession(&aws.Config{ - Region: aws.String(region), - Credentials: credentials.NewStaticCredentials(awsAccessKeyID, awsSecretAccessKey, awsSessionToken), + Region: aws.String(c.Region), + Credentials: credentials.NewStaticCredentials(c.AccessKeyID, c.SecretAccessKey, c.SessionToken), EndpointResolver: endpoints.ResolverFunc(stsCustResolverFn), })) - - creds := stscreds.NewCredentials(sess, cfg.AssumeRoleARN) - - _, err := sess.Config.Credentials.Get() + creds := stscreds.NewCredentials(sess, c.AssumeRoleARN) + secretString, err := secretsManagerGetSecretValue(sess, &aws.Config{Credentials: creds, Region: aws.String(c.Region)}, c.SecretName) if err != nil { - log.Printf("Session get credentials error: %s", err) - return *cfg, err + return nil, err } - _, err = creds.Get() + var secret config.Credentials + err = json.Unmarshal([]byte(secretString), &secret) if err != nil { - log.Printf("STS get credentials error: %s", err) - return *cfg, err + return nil, err } - secretString, err := secretsManagerGetSecretValue(sess, &aws.Config{Credentials: creds, Region: aws.String(region)}, secret) - if err != nil { - log.Printf("Get Secrets error: %s", err) - return *cfg, err - } - - var secretData SecretData - err = json.Unmarshal([]byte(secretString), &secretData) - if err != nil { - return *cfg, err - } - - switch config.ResolveAuthMethod(&secretData) { - case config.AccessToken: - cfg.AccessToken = secretData.AccessToken - case config.Digest: - cfg.PublicKey = secretData.PublicKey - cfg.PrivateKey = secretData.PrivateKey - case config.ServiceAccount: - cfg.ClientID = secretData.ClientID - cfg.ClientSecret = secretData.ClientSecret - case config.Unknown: - return *cfg, fmt.Errorf("secret missing value for supported credentials: PrivateKey/PublicKey, ClientID/ClientSecret or AccessToken") - } - - return *cfg, nil + return &secret, nil } func DeriveSTSRegionFromEndpoint(ep string) string { diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 54da063b2b..3801398d08 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -3,12 +3,10 @@ package provider import ( "context" "log" - "os" + "slices" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/metaschema" "github.com/hashicorp/terraform-plugin-framework/provider/schema" @@ -20,7 +18,6 @@ import ( "github.com/hashicorp/terraform-plugin-mux/tf5to6server" "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/alertconfiguration" @@ -53,8 +50,7 @@ import ( ) const ( - MongodbGovCloudURL = "https://cloud.mongodbgov.com" - MongodbGovCloudQAURL = "https://cloud-qa.mongodbgov.com" + govURL = "https://cloud.mongodbgov.com" MongodbGovCloudDevURL = "https://cloud-dev.mongodbgov.com" ProviderConfigError = "error in configuring the provider." MissingAuthAttrError = "either AWS Secrets Manager, Service Accounts or Atlas Programmatic API Keys attributes must be set" @@ -66,35 +62,38 @@ const ( ProviderMetaModuleVersionDesc = "The version of the module using the provider" ) +var ( + govAdditionalURLs = []string{ + "https://cloud-dev.mongodbgov.com", + "https://cloud-qa.mongodbgov.com", + } +) + type MongodbtlasProvider struct { } -type tfMongodbAtlasProviderModel struct { - AssumeRole types.List `tfsdk:"assume_role"` - Region types.String `tfsdk:"region"` - PrivateKey types.String `tfsdk:"private_key"` - BaseURL types.String `tfsdk:"base_url"` - RealmBaseURL types.String `tfsdk:"realm_base_url"` - SecretName types.String `tfsdk:"secret_name"` - PublicKey types.String `tfsdk:"public_key"` - StsEndpoint types.String `tfsdk:"sts_endpoint"` - AwsAccessKeyID types.String `tfsdk:"aws_access_key_id"` - AwsSecretAccessKeyID types.String `tfsdk:"aws_secret_access_key"` - AwsSessionToken types.String `tfsdk:"aws_session_token"` - ClientID types.String `tfsdk:"client_id"` - ClientSecret types.String `tfsdk:"client_secret"` - AccessToken types.String `tfsdk:"access_token"` - IsMongodbGovCloud types.Bool `tfsdk:"is_mongodbgov_cloud"` +type tfModel struct { + Region types.String `tfsdk:"region"` + PrivateKey types.String `tfsdk:"private_key"` + BaseURL types.String `tfsdk:"base_url"` + RealmBaseURL types.String `tfsdk:"realm_base_url"` + SecretName types.String `tfsdk:"secret_name"` + PublicKey types.String `tfsdk:"public_key"` + StsEndpoint types.String `tfsdk:"sts_endpoint"` + AwsAccessKeyID types.String `tfsdk:"aws_access_key_id"` + AwsSecretAccessKeyID types.String `tfsdk:"aws_secret_access_key"` + AwsSessionToken types.String `tfsdk:"aws_session_token"` + ClientID types.String `tfsdk:"client_id"` + ClientSecret types.String `tfsdk:"client_secret"` + AccessToken types.String `tfsdk:"access_token"` + AssumeRole []tfAssumeRoleModel `tfsdk:"assume_role"` + IsMongodbGovCloud types.Bool `tfsdk:"is_mongodbgov_cloud"` } type tfAssumeRoleModel struct { RoleARN types.String `tfsdk:"role_arn"` } -var AssumeRoleType = types.ObjectType{AttrTypes: map[string]attr.Type{ - "role_arn": types.StringType, -}} - func (p *MongodbtlasProvider) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { resp.TypeName = "mongodbatlas" resp.Version = version.ProviderVersion @@ -200,194 +199,57 @@ var fwAssumeRoleSchema = schema.ListNestedBlock{ } func (p *MongodbtlasProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { - var data tfMongodbAtlasProviderModel - - resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + providerVars := getProviderVars(ctx, req, resp) if resp.Diagnostics.HasError() { return } - - data = setDefaultValuesWithValidations(ctx, &data, resp) - if resp.Diagnostics.HasError() { + c, err := config.GetCredentials(providerVars, config.NewEnvVars(), getAWSCredentials) + if err != nil { + resp.Diagnostics.AddError("Error getting credentials for provider", err.Error()) return } - - cfg := config.Config{ - PublicKey: data.PublicKey.ValueString(), - PrivateKey: data.PrivateKey.ValueString(), - BaseURL: data.BaseURL.ValueString(), - RealmBaseURL: data.RealmBaseURL.ValueString(), - TerraformVersion: req.TerraformVersion, - ClientID: data.ClientID.ValueString(), - ClientSecret: data.ClientSecret.ValueString(), - AccessToken: data.AccessToken.ValueString(), - } - - var assumeRoles []tfAssumeRoleModel - data.AssumeRole.ElementsAs(ctx, &assumeRoles, true) - awsRoleDefined := len(assumeRoles) > 0 - if awsRoleDefined { - cfg.AssumeRoleARN = assumeRoles[0].RoleARN.ValueString() - secret := data.SecretName.ValueString() - region := conversion.MongoDBRegionToAWSRegion(data.Region.ValueString()) - awsAccessKeyID := data.AwsAccessKeyID.ValueString() - awsSecretAccessKey := data.AwsSecretAccessKeyID.ValueString() - awsSessionToken := data.AwsSessionToken.ValueString() - endpoint := data.StsEndpoint.ValueString() - var err error - cfg, err = configureCredentialsSTS(&cfg, secret, region, awsAccessKeyID, awsSecretAccessKey, awsSessionToken, endpoint) - if err != nil { - resp.Diagnostics.AddError("failed to configure credentials STS", err.Error()) - return - } + if c.Warnings() != "" { + resp.Diagnostics.AddWarning("Warning getting credentials for provider", c.Warnings()) } - - client, err := cfg.NewClient(ctx) - + client, err := config.NewClient(c, req.TerraformVersion) if err != nil { - resp.Diagnostics.AddError( - "failed to initialize a new client", - err.Error(), - ) + resp.Diagnostics.AddError("Error initializing provider", err.Error()) return } - resp.DataSourceData = client resp.ResourceData = client } -func setDefaultValuesWithValidations(ctx context.Context, data *tfMongodbAtlasProviderModel, resp *provider.ConfigureResponse) tfMongodbAtlasProviderModel { - if mongodbgovCloud := data.IsMongodbGovCloud.ValueBool(); mongodbgovCloud { - if !isGovBaseURLConfiguredForProvider(data) { - data.BaseURL = types.StringValue(MongodbGovCloudURL) - } - } - if data.BaseURL.ValueString() == "" { - data.BaseURL = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_BASE_URL", - "MCLI_OPS_MANAGER_URL", - }, "").(string)) - } - - awsRoleDefined := false - if len(data.AssumeRole.Elements()) == 0 { - assumeRoleArn := MultiEnvDefaultFunc([]string{ - "ASSUME_ROLE_ARN", - "TF_VAR_ASSUME_ROLE_ARN", - }, "").(string) - if assumeRoleArn != "" { - awsRoleDefined = true - var diags diag.Diagnostics - data.AssumeRole, diags = types.ListValueFrom(ctx, AssumeRoleType, []tfAssumeRoleModel{ - { - RoleARN: types.StringValue(assumeRoleArn), - }, - }) - if diags.HasError() { - resp.Diagnostics.Append(diags...) - } - } - } else { - awsRoleDefined = true - } - - if data.PublicKey.ValueString() == "" { - data.PublicKey = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_PUBLIC_API_KEY", - "MONGODB_ATLAS_PUBLIC_KEY", - "MCLI_PUBLIC_API_KEY", - }, "").(string)) - } - - if data.PrivateKey.ValueString() == "" { - data.PrivateKey = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_PRIVATE_API_KEY", - "MONGODB_ATLAS_PRIVATE_KEY", - "MCLI_PRIVATE_API_KEY", - }, "").(string)) - } - - if data.RealmBaseURL.ValueString() == "" { - data.RealmBaseURL = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_REALM_BASE_URL", - }, "").(string)) - } - - if data.Region.ValueString() == "" { - data.Region = types.StringValue(MultiEnvDefaultFunc([]string{ - "AWS_REGION", - "TF_VAR_AWS_REGION", - }, "").(string)) - } - - if data.StsEndpoint.ValueString() == "" { - data.StsEndpoint = types.StringValue(MultiEnvDefaultFunc([]string{ - "STS_ENDPOINT", - "TF_VAR_STS_ENDPOINT", - }, "").(string)) - } - - if data.AwsAccessKeyID.ValueString() == "" { - data.AwsAccessKeyID = types.StringValue(MultiEnvDefaultFunc([]string{ - "AWS_ACCESS_KEY_ID", - "TF_VAR_AWS_ACCESS_KEY_ID", - }, "").(string)) - } - - if data.AwsSecretAccessKeyID.ValueString() == "" { - data.AwsSecretAccessKeyID = types.StringValue(MultiEnvDefaultFunc([]string{ - "AWS_SECRET_ACCESS_KEY", - "TF_VAR_AWS_SECRET_ACCESS_KEY", - }, "").(string)) - } - - if data.AwsSessionToken.ValueString() == "" { - data.AwsSessionToken = types.StringValue(MultiEnvDefaultFunc([]string{ - "AWS_SESSION_TOKEN", - "TF_VAR_AWS_SESSION_TOKEN", - }, "").(string)) - } - - if data.SecretName.ValueString() == "" { - data.SecretName = types.StringValue(MultiEnvDefaultFunc([]string{ - "SECRET_NAME", - "TF_VAR_SECRET_NAME", - }, "").(string)) - } - - if data.ClientID.ValueString() == "" { - data.ClientID = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_CLIENT_ID", - "TF_VAR_CLIENT_ID", - }, "").(string)) +func getProviderVars(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) *config.Vars { + var data tfModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return nil } - - if data.ClientSecret.ValueString() == "" { - data.ClientSecret = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_CLIENT_SECRET", - "TF_VAR_CLIENT_SECRET", - }, "").(string)) + assumeRoleARN := "" + if len(data.AssumeRole) > 0 { + assumeRoleARN = data.AssumeRole[0].RoleARN.ValueString() } - - if data.AccessToken.ValueString() == "" { - data.AccessToken = types.StringValue(MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_OAUTH_TOKEN", - "TF_VAR_OAUTH_TOKEN", - }, "").(string)) + baseURL := data.BaseURL.ValueString() + if data.IsMongodbGovCloud.ValueBool() && !slices.Contains(govAdditionalURLs, baseURL) { + baseURL = govURL } - - // Check if any valid authentication method is provided - if !config.HasValidAuthCredentials(&config.Config{ - PublicKey: data.PublicKey.ValueString(), - PrivateKey: data.PrivateKey.ValueString(), - ClientID: data.ClientID.ValueString(), - ClientSecret: data.ClientSecret.ValueString(), - AccessToken: data.AccessToken.ValueString(), - }) && !awsRoleDefined { - resp.Diagnostics.AddError(ProviderConfigError, MissingAuthAttrError) + return &config.Vars{ + AccessToken: data.AccessToken.ValueString(), + ClientID: data.ClientID.ValueString(), + ClientSecret: data.ClientSecret.ValueString(), + PublicKey: data.PublicKey.ValueString(), + PrivateKey: data.PrivateKey.ValueString(), + BaseURL: baseURL, + RealmBaseURL: data.RealmBaseURL.ValueString(), + AWSAssumeRoleARN: assumeRoleARN, + AWSSecretName: data.SecretName.ValueString(), + AWSRegion: data.Region.ValueString(), + AWSAccessKeyID: data.AwsAccessKeyID.ValueString(), + AWSSecretAccessKey: data.AwsSecretAccessKeyID.ValueString(), + AWSSessionToken: data.AwsSessionToken.ValueString(), + AWSEndpoint: data.StsEndpoint.ValueString(), } - - return *data } func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.DataSource { @@ -486,26 +348,3 @@ func MuxProviderFactory() func() tfprotov6.ProviderServer { } return muxServer.ProviderServer } - -func MultiEnvDefaultFunc(ks []string, def any) any { - for _, k := range ks { - if v := os.Getenv(k); v != "" { - return v - } - } - return def -} - -func isGovBaseURLConfigured(baseURL string) bool { - if baseURL == "" { - baseURL = MultiEnvDefaultFunc([]string{ - "MONGODB_ATLAS_BASE_URL", - "MCLI_OPS_MANAGER_URL", - }, "").(string) - } - return baseURL == MongodbGovCloudDevURL || baseURL == MongodbGovCloudQAURL -} - -func isGovBaseURLConfiguredForProvider(data *tfMongodbAtlasProviderModel) bool { - return isGovBaseURLConfigured(data.BaseURL.ValueString()) -} diff --git a/internal/provider/provider_sdk2.go b/internal/provider/provider_sdk2.go index 283b2c447d..0259167d13 100644 --- a/internal/provider/provider_sdk2.go +++ b/internal/provider/provider_sdk2.go @@ -2,11 +2,12 @@ package provider import ( "context" + "fmt" + "slices" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/accesslistapikey" "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/apikey" @@ -52,21 +53,6 @@ import ( "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/x509authenticationdatabaseuser" ) -type SecretData struct { - PublicKey string `json:"public_key"` - PrivateKey string `json:"private_key"` - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` -} - -// CredentialProvider implementation for SecretData -func (s *SecretData) GetPublicKey() string { return s.PublicKey } -func (s *SecretData) GetPrivateKey() string { return s.PrivateKey } -func (s *SecretData) GetClientID() string { return s.ClientID } -func (s *SecretData) GetClientSecret() string { return s.ClientSecret } -func (s *SecretData) GetAccessToken() string { return s.AccessToken } - // NewSdkV2Provider returns the provider to be use by the code. func NewSdkV2Provider() *schema.Provider { provider := &schema.Provider{ @@ -169,6 +155,23 @@ func NewSdkV2Provider() *schema.Provider { return provider } +func assumeRoleSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "Amazon Resource Name (ARN) of an IAM Role to assume prior to making API calls.", + }, + }, + }, + } +} + func getDataSourcesMap() map[string]*schema.Resource { dataSourcesMap := map[string]*schema.Resource{ "mongodbatlas_custom_db_role": customdbrole.DataSource(), @@ -293,218 +296,47 @@ func getResourcesMap() map[string]*schema.Resource { func providerConfigure(provider *schema.Provider) func(ctx context.Context, d *schema.ResourceData) (any, diag.Diagnostics) { return func(ctx context.Context, d *schema.ResourceData) (any, diag.Diagnostics) { - diagnostics := setDefaultsAndValidations(d) - if diagnostics.HasError() { - return nil, diagnostics - } - - cfg := config.Config{ - PublicKey: d.Get("public_key").(string), - PrivateKey: d.Get("private_key").(string), - BaseURL: d.Get("base_url").(string), - RealmBaseURL: d.Get("realm_base_url").(string), - TerraformVersion: provider.TerraformVersion, - ClientID: d.Get("client_id").(string), - ClientSecret: d.Get("client_secret").(string), - AccessToken: d.Get("access_token").(string), - } - - assumeRoleValue, ok := d.GetOk("assume_role") - awsRoleDefined := ok && len(assumeRoleValue.([]any)) > 0 && assumeRoleValue.([]any)[0] != nil - if awsRoleDefined { - cfg.AssumeRoleARN = getAssumeRoleARN(assumeRoleValue.([]any)[0].(map[string]any)) - secret := d.Get("secret_name").(string) - region := conversion.MongoDBRegionToAWSRegion(d.Get("region").(string)) - awsAccessKeyID := d.Get("aws_access_key_id").(string) - awsSecretAccessKey := d.Get("aws_secret_access_key").(string) - awsSessionToken := d.Get("aws_session_token").(string) - endpoint := d.Get("sts_endpoint").(string) - var err error - cfg, err = configureCredentialsSTS(&cfg, secret, region, awsAccessKeyID, awsSecretAccessKey, awsSessionToken, endpoint) - if err != nil { - return nil, append(diagnostics, diag.FromErr(err)...) - } + var diags diag.Diagnostics + providerVars := getSDKv2ProviderVars(d) + c, err := config.GetCredentials(providerVars, config.NewEnvVars(), getAWSCredentials) + if err != nil { + return nil, append(diags, diag.FromErr(fmt.Errorf("error getting credentials for provider: %w", err))...) } - - client, err := cfg.NewClient(ctx) + // Don't log possible warnings as they will be logged by the TPF provider. + client, err := config.NewClient(c, provider.TerraformVersion) if err != nil { - return nil, append(diagnostics, diag.FromErr(err)...) + return nil, append(diags, diag.FromErr(fmt.Errorf("error initializing provider: %w", err))...) } - return client, diagnostics + return client, nil } } -func setDefaultsAndValidations(d *schema.ResourceData) diag.Diagnostics { - diagnostics := []diag.Diagnostic{} - - mongodbgovCloud := conversion.Pointer(d.Get("is_mongodbgov_cloud").(bool)) - if *mongodbgovCloud { - if !isGovBaseURLConfiguredForSDK2Provider(d) { - if err := d.Set("base_url", MongodbGovCloudURL); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - } - } - - if err := setValueFromConfigOrEnv(d, "base_url", []string{ - "MONGODB_ATLAS_BASE_URL", - "MCLI_OPS_MANAGER_URL", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - awsRoleDefined := false +func getSDKv2ProviderVars(d *schema.ResourceData) *config.Vars { + assumeRoleARN := "" assumeRoles := d.Get("assume_role").([]any) - if len(assumeRoles) == 0 { - roleArn := MultiEnvDefaultFunc([]string{ - "ASSUME_ROLE_ARN", - "TF_VAR_ASSUME_ROLE_ARN", - }, "").(string) - if roleArn != "" { - awsRoleDefined = true - if err := d.Set("assume_role", []map[string]any{{"role_arn": roleArn}}); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } + if len(assumeRoles) > 0 { + if assumeRole, ok := assumeRoles[0].(map[string]any); ok { + assumeRoleARN = assumeRole["role_arn"].(string) } - } else { - awsRoleDefined = true } - - if err := setValueFromConfigOrEnv(d, "public_key", []string{ - "MONGODB_ATLAS_PUBLIC_API_KEY", - "MONGODB_ATLAS_PUBLIC_KEY", - "MCLI_PUBLIC_API_KEY", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "private_key", []string{ - "MONGODB_ATLAS_PRIVATE_API_KEY", - "MONGODB_ATLAS_PRIVATE_KEY", - "MCLI_PRIVATE_API_KEY", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "realm_base_url", []string{ - "MONGODB_REALM_BASE_URL", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) + baseURL := d.Get("base_url").(string) + if d.Get("is_mongodbgov_cloud").(bool) && !slices.Contains(govAdditionalURLs, baseURL) { + baseURL = govURL } - - if err := setValueFromConfigOrEnv(d, "region", []string{ - "AWS_REGION", - "TF_VAR_AWS_REGION", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "sts_endpoint", []string{ - "STS_ENDPOINT", - "TF_VAR_STS_ENDPOINT", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "aws_access_key_id", []string{ - "AWS_ACCESS_KEY_ID", - "TF_VAR_AWS_ACCESS_KEY_ID", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) + return &config.Vars{ + AccessToken: d.Get("access_token").(string), + ClientID: d.Get("client_id").(string), + ClientSecret: d.Get("client_secret").(string), + PublicKey: d.Get("public_key").(string), + PrivateKey: d.Get("private_key").(string), + BaseURL: baseURL, + RealmBaseURL: d.Get("realm_base_url").(string), + AWSAssumeRoleARN: assumeRoleARN, + AWSSecretName: d.Get("secret_name").(string), + AWSRegion: d.Get("region").(string), + AWSAccessKeyID: d.Get("aws_access_key_id").(string), + AWSSecretAccessKey: d.Get("aws_secret_access_key").(string), + AWSSessionToken: d.Get("aws_session_token").(string), + AWSEndpoint: d.Get("sts_endpoint").(string), } - - if err := setValueFromConfigOrEnv(d, "aws_secret_access_key", []string{ - "AWS_SECRET_ACCESS_KEY", - "TF_VAR_AWS_SECRET_ACCESS_KEY", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "secret_name", []string{ - "SECRET_NAME", - "TF_VAR_SECRET_NAME", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "aws_session_token", []string{ - "AWS_SESSION_TOKEN", - "TF_VAR_AWS_SESSION_TOKEN", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "client_id", []string{ - "MONGODB_ATLAS_CLIENT_ID", - "TF_VAR_CLIENT_ID", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "client_secret", []string{ - "MONGODB_ATLAS_CLIENT_SECRET", - "TF_VAR_CLIENT_SECRET", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - if err := setValueFromConfigOrEnv(d, "access_token", []string{ - "MONGODB_ATLAS_OAUTH_TOKEN", - "TF_VAR_OAUTH_TOKEN", - }); err != nil { - return append(diagnostics, diag.FromErr(err)...) - } - - // Check if any valid authentication method is provided - if !config.HasValidAuthCredentials(&config.Config{ - PublicKey: d.Get("public_key").(string), - PrivateKey: d.Get("private_key").(string), - ClientID: d.Get("client_id").(string), - ClientSecret: d.Get("client_secret").(string), - AccessToken: d.Get("access_token").(string), - }) && !awsRoleDefined { - diagnostics = append(diagnostics, diag.Diagnostic{Severity: diag.Error, Summary: MissingAuthAttrError}) - } - - return diagnostics -} - -func setValueFromConfigOrEnv(d *schema.ResourceData, attrName string, envVars []string) error { - var val = d.Get(attrName).(string) - if val == "" { - val = MultiEnvDefaultFunc(envVars, "").(string) - } - return d.Set(attrName, val) -} - -// assumeRoleSchema From aws provider.go -func assumeRoleSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role_arn": { - Type: schema.TypeString, - Optional: true, - Description: "Amazon Resource Name (ARN) of an IAM Role to assume prior to making API calls.", - }, - }, - }, - } -} - -func getAssumeRoleARN(tfMap map[string]any) string { - if tfMap == nil { - return "" - } - if v, ok := tfMap["role_arn"].(string); ok && v != "" { - return v - } - return "" -} - -func isGovBaseURLConfiguredForSDK2Provider(d *schema.ResourceData) bool { - return isGovBaseURLConfigured(d.Get("base_url").(string)) } diff --git a/internal/service/eventtrigger/data_source_event_trigger.go b/internal/service/eventtrigger/data_source_event_trigger.go index 5cf8c318ff..bac40a4d0a 100644 --- a/internal/service/eventtrigger/data_source_event_trigger.go +++ b/internal/service/eventtrigger/data_source_event_trigger.go @@ -133,7 +133,7 @@ func DataSource() *schema.Resource { } func dataSourceMongoDBAtlasEventTriggerRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return diag.FromErr(err) } diff --git a/internal/service/eventtrigger/data_source_event_triggers.go b/internal/service/eventtrigger/data_source_event_triggers.go index 120b42bdca..02295a331d 100644 --- a/internal/service/eventtrigger/data_source_event_triggers.go +++ b/internal/service/eventtrigger/data_source_event_triggers.go @@ -144,9 +144,8 @@ func PluralDataSource() *schema.Resource { } func dataSourceMongoDBAtlasEventTriggersRead(d *schema.ResourceData, meta any) error { - // Get client connection. ctx := context.Background() - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return err } diff --git a/internal/service/eventtrigger/resource_event_trigger.go b/internal/service/eventtrigger/resource_event_trigger.go index b1e99aee41..1a90c8f3ab 100644 --- a/internal/service/eventtrigger/resource_event_trigger.go +++ b/internal/service/eventtrigger/resource_event_trigger.go @@ -210,7 +210,7 @@ func Resource() *schema.Resource { } func resourceMongoDBAtlasEventTriggersCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return diag.FromErr(err) } @@ -312,7 +312,7 @@ func resourceMongoDBAtlasEventTriggersCreate(ctx context.Context, d *schema.Reso } func resourceMongoDBAtlasEventTriggersRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return diag.FromErr(err) } @@ -402,7 +402,7 @@ func resourceMongoDBAtlasEventTriggersRead(ctx context.Context, d *schema.Resour } func resourceMongoDBAtlasEventTriggersUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return diag.FromErr(err) } @@ -453,8 +453,7 @@ func resourceMongoDBAtlasEventTriggersUpdate(ctx context.Context, d *schema.Reso } func resourceMongoDBAtlasEventTriggersDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - // Get the client connection. - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return diag.FromErr(err) } @@ -515,7 +514,7 @@ func flattenTriggerEventProcessorAWSEventBridge(eventProcessor map[string]any) [ } func resourceMongoDBAtlasEventTriggerImportState(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - conn, err := meta.(*config.MongoDBClient).GetRealmClient(ctx) + conn, err := meta.(*config.MongoDBClient).Realm.Get(ctx) if err != nil { return nil, err } diff --git a/internal/service/eventtrigger/resource_event_trigger_test.go b/internal/service/eventtrigger/resource_event_trigger_test.go index 30f2c2734d..865690c6a7 100644 --- a/internal/service/eventtrigger/resource_event_trigger_test.go +++ b/internal/service/eventtrigger/resource_event_trigger_test.go @@ -484,7 +484,7 @@ func TestAccEventTrigger_functionBasic(t *testing.T) { func checkExists(resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { ctx := context.Background() - conn, err := acc.MongoDBClient.GetRealmClient(ctx) + conn, err := acc.MongoDBClient.Realm.Get(ctx) if err != nil { return err } @@ -513,7 +513,7 @@ func checkExists(resourceName string) resource.TestCheckFunc { func checkDestroy(s *terraform.State) error { ctx := context.Background() - conn, err := acc.MongoDBClient.GetRealmClient(ctx) + conn, err := acc.MongoDBClient.Realm.Get(ctx) if err != nil { return err } diff --git a/internal/service/organization/resource_organization.go b/internal/service/organization/resource_organization.go index d8cf0c5080..3cd342f7f0 100644 --- a/internal/service/organization/resource_organization.go +++ b/internal/service/organization/resource_organization.go @@ -113,7 +113,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err := ValidateAPIKeyIsOrgOwner(conversion.ExpandStringList(d.Get("role_names").(*schema.Set).List())); err != nil { return diag.FromErr(err) } - conn := getAtlasV2Connection(ctx, d, meta) // Using provider credentials. + conn := getAtlasV2Connection(d, meta) // Using provider credentials. organization, resp, err := conn.OrganizationsApi.CreateOrg(ctx, newCreateOrganizationRequest(d)).Execute() if err != nil { if validate.StatusNotFound(resp) && !strings.Contains(err.Error(), "USER_NOT_FOUND") { @@ -128,7 +128,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. if err := d.Set("public_key", organization.ApiKey.GetPublicKey()); err != nil { return diag.FromErr(fmt.Errorf("error setting `public_key`: %s", err)) } - conn = getAtlasV2Connection(ctx, d, meta) // Using new credentials from the created organization. + conn = getAtlasV2Connection(d, meta) // Using new credentials from the created organization. orgID := organization.Organization.GetId() _, _, errUpdate := conn.OrganizationsApi.UpdateOrgSettings(ctx, orgID, newOrganizationSettings(d)).Execute() if errUpdate != nil { @@ -146,7 +146,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := getAtlasV2Connection(ctx, d, meta) + conn := getAtlasV2Connection(d, meta) ids := conversion.DecodeStateID(d.Id()) orgID := ids["org_id"] @@ -194,7 +194,7 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di } func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := getAtlasV2Connection(ctx, d, meta) + conn := getAtlasV2Connection(d, meta) ids := conversion.DecodeStateID(d.Id()) orgID := ids["org_id"] for _, attr := range attrsCreateOnly { @@ -227,7 +227,7 @@ func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag. } func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - conn := getAtlasV2Connection(ctx, d, meta) + conn := getAtlasV2Connection(d, meta) ids := conversion.DecodeStateID(d.Id()) orgID := ids["org_id"] @@ -293,18 +293,21 @@ func ValidateAPIKeyIsOrgOwner(roles []string) error { // getAtlasV2Connection uses the created credentials for the organization if they exist. // Otherwise, it uses the provider credentials, e.g. if the resource was imported. -func getAtlasV2Connection(ctx context.Context, d *schema.ResourceData, meta any) *admin.APIClient { +func getAtlasV2Connection(d *schema.ResourceData, meta any) *admin.APIClient { + currentClient := meta.(*config.MongoDBClient) publicKey := d.Get("public_key").(string) privateKey := d.Get("private_key").(string) if publicKey == "" || privateKey == "" { - return meta.(*config.MongoDBClient).AtlasV2 + return currentClient.AtlasV2 } - cfg := config.Config{ - PublicKey: publicKey, - PrivateKey: privateKey, - BaseURL: meta.(*config.MongoDBClient).Config.BaseURL, - TerraformVersion: meta.(*config.MongoDBClient).Config.TerraformVersion, + c := &config.Credentials{ + PublicKey: publicKey, + PrivateKey: privateKey, + BaseURL: currentClient.BaseURL, } - clients, _ := cfg.NewClient(ctx) - return clients.AtlasV2 + newClient, err := config.NewClient(c, currentClient.TerraformVersion) + if err != nil { + return currentClient.AtlasV2 + } + return newClient.AtlasV2 } diff --git a/internal/service/organization/resource_organization_test.go b/internal/service/organization/resource_organization_test.go index c862b6e672..7402c219c0 100644 --- a/internal/service/organization/resource_organization_test.go +++ b/internal/service/organization/resource_organization_test.go @@ -427,18 +427,16 @@ func getTestClientWithNewOrgCreds(rs *terraform.ResourceState) (*admin.APIClient if rs.Primary.Attributes["public_key"] == "" { return nil, fmt.Errorf("no public_key is set") } - if rs.Primary.Attributes["private_key"] == "" { return nil, fmt.Errorf("no private_key is set") } - - cfg := config.Config{ + c := &config.Credentials{ PublicKey: rs.Primary.Attributes["public_key"], PrivateKey: rs.Primary.Attributes["private_key"], - BaseURL: acc.MongoDBClient.Config.BaseURL, + BaseURL: acc.MongoDBClient.BaseURL, } - clients, _ := cfg.NewClient(context.Background()) - return clients.AtlasV2, nil + client, _ := config.NewClient(c, acc.MongoDBClient.TerraformVersion) + return client.AtlasV2, nil } func TestValidateAPIKeyIsOrgOwner(t *testing.T) { diff --git a/internal/testutil/acc/factory.go b/internal/testutil/acc/factory.go index 29f572540c..e654ec914c 100644 --- a/internal/testutil/acc/factory.go +++ b/internal/testutil/acc/factory.go @@ -1,7 +1,6 @@ package acc import ( - "context" "os" matlas "go.mongodb.org/atlas/mongodbatlas" @@ -42,12 +41,12 @@ func ConnV220241113() *admin20241113.APIClient { } func ConnV2UsingGov() *admin.APIClient { - cfg := config.Config{ + c := &config.Credentials{ PublicKey: os.Getenv("MONGODB_ATLAS_GOV_PUBLIC_KEY"), PrivateKey: os.Getenv("MONGODB_ATLAS_GOV_PRIVATE_KEY"), BaseURL: os.Getenv("MONGODB_ATLAS_GOV_BASE_URL"), } - client, _ := cfg.NewClient(context.Background()) + client, _ := config.NewClient(c, "") return client.AtlasV2 } @@ -57,7 +56,7 @@ func init() { return provider.MuxProviderFactory()(), nil }, } - cfg := config.Config{ + c := &config.Credentials{ PublicKey: os.Getenv("MONGODB_ATLAS_PUBLIC_KEY"), PrivateKey: os.Getenv("MONGODB_ATLAS_PRIVATE_KEY"), ClientID: os.Getenv("MONGODB_ATLAS_CLIENT_ID"), @@ -65,5 +64,5 @@ func init() { BaseURL: os.Getenv("MONGODB_ATLAS_BASE_URL"), RealmBaseURL: os.Getenv("MONGODB_REALM_BASE_URL"), } - MongoDBClient, _ = cfg.NewClient(context.Background()) + MongoDBClient, _ = config.NewClient(c, "") } diff --git a/internal/testutil/acc/pre_check.go b/internal/testutil/acc/pre_check.go index 8d98c621b5..0bc99d38b7 100644 --- a/internal/testutil/acc/pre_check.go +++ b/internal/testutil/acc/pre_check.go @@ -347,7 +347,7 @@ func PreCheckAwsMsk(tb testing.TB) { func PreCheckAccessToken(tb testing.TB) { tb.Helper() - if os.Getenv("MONGODB_ATLAS_OAUTH_TOKEN") == "" { - tb.Fatal("`MONGODB_ATLAS_OAUTH_TOKEN` must be set for Atlas Access Token acceptance testing") + if os.Getenv("MONGODB_ATLAS_ACCESS_TOKEN") == "" { + tb.Fatal("`MONGODB_ATLAS_ACCESS_TOKEN` must be set for Atlas Access Token acceptance testing") } } From 8aa6d8aaab70d9761bf4466f2c008963b747bffb Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Mon, 6 Oct 2025 22:57:09 +0200 Subject: [PATCH 7/8] chore: Remove unneeded Atlas versions (#3752) * remove Atlas version 20240805 * remove admin20240530 version from resources except cluster * NormalizeBaseURL --- go.mod | 3 +- go.sum | 2 - internal/config/client.go | 32 +-- internal/config/service_account.go | 6 +- internal/service/advancedcluster/common.go | 2 - .../common_model_sdk_version_conversion.go | 256 ------------------ ...ommon_model_sdk_version_conversion_test.go | 193 ------------- .../service/advancedcluster/resource_test.go | 44 ++- ...ce_cloud_backup_schedule_migration_test.go | 8 +- .../resource_cloud_backup_schedule_test.go | 36 +-- .../testutil/acc/independent_shard_scaling.go | 5 +- 11 files changed, 56 insertions(+), 531 deletions(-) delete mode 100644 internal/service/advancedcluster/common_model_sdk_version_conversion.go delete mode 100644 internal/service/advancedcluster/common_model_sdk_version_conversion_test.go diff --git a/go.mod b/go.mod index 65919a5b6f..94a45aca3f 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,6 @@ require ( github.com/zclconf/go-cty v1.17.0 go.mongodb.org/atlas v0.38.0 go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 - go.mongodb.org/atlas-sdk/v20240805005 v20240805005.0.1-0.20250402112219-2468c5354718 // uses api-bot-update-v20240805-backport-cluster to support AdvancedConfiguration in create/updateCluster APIs go.mongodb.org/atlas-sdk/v20241113005 v20241113005.0.0 go.mongodb.org/realm v0.1.0 gopkg.in/yaml.v3 v3.0.1 @@ -43,6 +42,7 @@ require ( github.com/hashicorp/terraform-json v0.27.2 github.com/hashicorp/terraform-plugin-framework-jsontypes v0.2.0 go.mongodb.org/atlas-sdk/v20250312007 v20250312007.0.0 + golang.org/x/oauth2 v0.31.0 ) require ( @@ -163,7 +163,6 @@ require ( golang.org/x/crypto v0.42.0 // indirect golang.org/x/mod v0.27.0 // indirect golang.org/x/net v0.43.0 // indirect - golang.org/x/oauth2 v0.31.0 // indirect golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.36.0 // indirect golang.org/x/text v0.29.0 // indirect diff --git a/go.sum b/go.sum index b3fb988a3c..cee9c557df 100644 --- a/go.sum +++ b/go.sum @@ -1364,8 +1364,6 @@ go.mongodb.org/atlas v0.38.0 h1:zfwymq20GqivGwxPZfypfUDry+WwMGVui97z1d8V4bU= go.mongodb.org/atlas v0.38.0/go.mod h1:DJYtM+vsEpPEMSkQzJnFHrT0sP7ev6cseZc/GGjJYG8= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0 h1:d/gbYJ+obR0EM/3DZf7+ZMi2QWISegm3mid7Or708cc= go.mongodb.org/atlas-sdk/v20240530005 v20240530005.0.0/go.mod h1:O47ZrMMfcWb31wznNIq2PQkkdoFoK0ea2GlmRqGJC2s= -go.mongodb.org/atlas-sdk/v20240805005 v20240805005.0.1-0.20250402112219-2468c5354718 h1:M2mNSBdTkP+paQ1qZ6FliiPdTEbDR9m9qvv4vsWoJAw= -go.mongodb.org/atlas-sdk/v20240805005 v20240805005.0.1-0.20250402112219-2468c5354718/go.mod h1:PeByRxdvzfvz7xhG5vDn60j836EoduWqTqs76okUc9c= go.mongodb.org/atlas-sdk/v20241113005 v20241113005.0.0 h1:aaU2E4rtzYXuEDxv9MoSON2gOEAA9M2gsDf2CqjcGj8= go.mongodb.org/atlas-sdk/v20241113005 v20241113005.0.0/go.mod h1:eV9REWR36iVMrpZUAMZ5qPbXEatoVfmzwT+Ue8yqU+U= go.mongodb.org/atlas-sdk/v20250312007 v20250312007.0.0 h1:2k6eXWQzTpbc/maZotRIoyXq3l/pbCF1RBMt+WnuB0I= diff --git a/internal/config/client.go b/internal/config/client.go index 574eb579a0..c6245ed1cb 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -11,7 +11,6 @@ import ( "time" admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" admin20241113 "go.mongodb.org/atlas-sdk/v20241113005/admin" "go.mongodb.org/atlas-sdk/v20250312007/admin" matlasClient "go.mongodb.org/atlas/mongodbatlas" @@ -71,17 +70,16 @@ func tfLoggingInterceptor(base http.RoundTripper) http.RoundTripper { return logging.NewTransport("Atlas", base) } -// MongoDBClient contains the mongodbatlas clients and configurations +// MongoDBClient contains the mongodbatlas clients and configurations. type MongoDBClient struct { Atlas *matlasClient.Client AtlasV2 *admin.APIClient AtlasPreview *adminpreview.APIClient - AtlasV220240805 *admin20240805.APIClient // used in advanced_cluster to avoid adopting 2024-10-23 release with ISS autoscaling - AtlasV220240530 *admin20240530.APIClient // used in advanced_cluster and cloud_backup_schedule for avoiding breaking changes (supporting deprecated replication_specs.id) - AtlasV220241113 *admin20241113.APIClient // used in teams and atlas_users to avoiding breaking changes + AtlasV220240530 *admin20240530.APIClient // Used in cluster to support deprecated attributes default_read_concern and fail_index_key_too_long in advanced_configuration. + AtlasV220241113 *admin20241113.APIClient // Used in teams and atlas_users to avoiding breaking changes. Realm *RealmClient - BaseURL string // needed by organization resource - TerraformVersion string // needed by organization resource + BaseURL string // Needed by organization resource. + TerraformVersion string // Needed by organization resource. } type RealmClient struct { @@ -121,10 +119,6 @@ func NewClient(c *Credentials, terraformVersion string) (*MongoDBClient, error) if err != nil { return nil, err } - sdkV220240805Client, err := newSDKV220240805Client(client, c.BaseURL, userAgent) - if err != nil { - return nil, err - } sdkV220241113Client, err := newSDKV220241113Client(client, c.BaseURL, userAgent) if err != nil { return nil, err @@ -135,14 +129,13 @@ func NewClient(c *Credentials, terraformVersion string) (*MongoDBClient, error) AtlasV2: sdkV2Client, AtlasPreview: sdkPreviewClient, AtlasV220240530: sdkV220240530Client, - AtlasV220240805: sdkV220240805Client, AtlasV220241113: sdkV220241113Client, BaseURL: c.BaseURL, TerraformVersion: terraformVersion, Realm: &RealmClient{ publicKey: c.PublicKey, privateKey: c.PrivateKey, - realmBaseURL: c.RealmBaseURL, + realmBaseURL: NormalizeBaseURL(c.RealmBaseURL), terraformVersion: terraformVersion, }, } @@ -204,15 +197,6 @@ func newSDKV220240530Client(client *http.Client, baseURL, userAgent string) (*ad ) } -func newSDKV220240805Client(client *http.Client, baseURL, userAgent string) (*admin20240805.APIClient, error) { - return admin20240805.NewClient( - admin20240805.UseHTTPClient(client), - admin20240805.UseUserAgent(userAgent), - admin20240805.UseBaseURL(baseURL), - admin20240805.UseDebug(false), - ) -} - func newSDKV220241113Client(client *http.Client, baseURL, userAgent string) (*admin20241113.APIClient, error) { return admin20241113.NewClient( admin20241113.UseHTTPClient(client), @@ -234,9 +218,9 @@ func (r *RealmClient) Get(ctx context.Context) (*realm.Client, error) { authConfig := realmAuth.NewConfig(nil) if r.realmBaseURL != "" { - adminURL := r.realmBaseURL + "api/admin/v3.0/" + adminURL := r.realmBaseURL + "/api/admin/v3.0/" optsRealm = append(optsRealm, realm.SetBaseURL(adminURL)) - authConfig.AuthURL, _ = url.Parse(adminURL + "auth/providers/mongodb-cloud/login") + authConfig.AuthURL, _ = url.Parse(adminURL + "/auth/providers/mongodb-cloud/login") } token, err := authConfig.NewTokenFromCredentials(ctx, r.publicKey, r.privateKey) diff --git a/internal/config/service_account.go b/internal/config/service_account.go index 84b41cc64e..a2f9d9f63e 100644 --- a/internal/config/service_account.go +++ b/internal/config/service_account.go @@ -28,7 +28,7 @@ func getTokenSource(clientID, clientSecret, baseURL string, tokenRenewalBase htt saInfo.mu.Lock() defer saInfo.mu.Unlock() - baseURL = strings.TrimRight(baseURL, "/") + baseURL = NormalizeBaseURL(baseURL) if saInfo.tokenSource != nil { // Token source in cache. if saInfo.clientID != clientID || saInfo.clientSecret != clientSecret || saInfo.baseURL != baseURL { return nil, fmt.Errorf("service account credentials changed") @@ -53,3 +53,7 @@ func getTokenSource(clientID, clientSecret, baseURL string, tokenRenewalBase htt saInfo.tokenSource = tokenSource return saInfo.tokenSource, nil } + +func NormalizeBaseURL(baseURL string) string { + return strings.TrimRight(baseURL, "/") +} diff --git a/internal/service/advancedcluster/common.go b/internal/service/advancedcluster/common.go index 86922dc931..ce95923485 100644 --- a/internal/service/advancedcluster/common.go +++ b/internal/service/advancedcluster/common.go @@ -6,7 +6,6 @@ import ( "strings" "time" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" "go.mongodb.org/atlas-sdk/v20250312007/admin" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -27,7 +26,6 @@ var ( ) type ProcessArgs struct { - ArgsLegacy *admin20240530.ClusterDescriptionProcessArgs ArgsDefault *admin.ClusterDescriptionProcessArgs20240805 ClusterAdvancedConfig *admin.ApiAtlasClusterAdvancedConfiguration } diff --git a/internal/service/advancedcluster/common_model_sdk_version_conversion.go b/internal/service/advancedcluster/common_model_sdk_version_conversion.go deleted file mode 100644 index 137e755e77..0000000000 --- a/internal/service/advancedcluster/common_model_sdk_version_conversion.go +++ /dev/null @@ -1,256 +0,0 @@ -package advancedcluster - -import ( - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" - "go.mongodb.org/atlas-sdk/v20250312007/admin" -) - -// Conversions from one SDK model version to another are used to avoid duplicating our flatten/expand conversion functions. -// - These functions must not contain any business logic. -// - All will be removed once we rely on a single API version. - -func ConvertClusterDescription20241023to20240805(clusterDescription *admin.ClusterDescription20240805) *admin20240805.ClusterDescription20240805 { - return &admin20240805.ClusterDescription20240805{ - Name: clusterDescription.Name, - ClusterType: clusterDescription.ClusterType, - ReplicationSpecs: convertReplicationSpecs20241023to20240805(clusterDescription.ReplicationSpecs), - BackupEnabled: clusterDescription.BackupEnabled, - BiConnector: convertBiConnector20241023to20240805(clusterDescription.BiConnector), - EncryptionAtRestProvider: clusterDescription.EncryptionAtRestProvider, - Labels: convertLabels20241023to20240805(clusterDescription.Labels), - Tags: convertTag20241023to20240805(clusterDescription.Tags), - MongoDBMajorVersion: clusterDescription.MongoDBMajorVersion, - PitEnabled: clusterDescription.PitEnabled, - RootCertType: clusterDescription.RootCertType, - TerminationProtectionEnabled: clusterDescription.TerminationProtectionEnabled, - VersionReleaseSystem: clusterDescription.VersionReleaseSystem, - GlobalClusterSelfManagedSharding: clusterDescription.GlobalClusterSelfManagedSharding, - ReplicaSetScalingStrategy: clusterDescription.ReplicaSetScalingStrategy, - RedactClientLogData: clusterDescription.RedactClientLogData, - ConfigServerManagementMode: clusterDescription.ConfigServerManagementMode, - AdvancedConfiguration: convertAdvancedConfiguration20250312to20240805(clusterDescription.AdvancedConfiguration), - } -} - -func convertReplicationSpecs20241023to20240805(replicationSpecs *[]admin.ReplicationSpec20240805) *[]admin20240805.ReplicationSpec20240805 { - if replicationSpecs == nil { - return nil - } - result := make([]admin20240805.ReplicationSpec20240805, len(*replicationSpecs)) - for i, replicationSpec := range *replicationSpecs { - result[i] = admin20240805.ReplicationSpec20240805{ - Id: replicationSpec.Id, - ZoneName: replicationSpec.ZoneName, - ZoneId: replicationSpec.ZoneId, - RegionConfigs: convertCloudRegionConfig20241023to20240805(replicationSpec.RegionConfigs), - } - } - return &result -} - -func convertCloudRegionConfig20241023to20240805(cloudRegionConfig *[]admin.CloudRegionConfig20240805) *[]admin20240805.CloudRegionConfig20240805 { - if cloudRegionConfig == nil { - return nil - } - result := make([]admin20240805.CloudRegionConfig20240805, len(*cloudRegionConfig)) - for i, regionConfig := range *cloudRegionConfig { - result[i] = admin20240805.CloudRegionConfig20240805{ - ProviderName: regionConfig.ProviderName, - RegionName: regionConfig.RegionName, - BackingProviderName: regionConfig.BackingProviderName, - Priority: regionConfig.Priority, - ElectableSpecs: convertHardwareSpec20241023to20240805(regionConfig.ElectableSpecs), - ReadOnlySpecs: convertDedicatedHardwareSpec20241023to20240805(regionConfig.ReadOnlySpecs), - AnalyticsSpecs: convertDedicatedHardwareSpec20241023to20240805(regionConfig.AnalyticsSpecs), - AutoScaling: convertAdvancedAutoScalingSettings20241023to20240805(regionConfig.AutoScaling), - AnalyticsAutoScaling: convertAdvancedAutoScalingSettings20241023to20240805(regionConfig.AnalyticsAutoScaling), - } - } - return &result -} - -func convertAdvancedAutoScalingSettings20241023to20240805(advancedAutoScalingSettings *admin.AdvancedAutoScalingSettings) *admin20240805.AdvancedAutoScalingSettings { - if advancedAutoScalingSettings == nil { - return nil - } - return &admin20240805.AdvancedAutoScalingSettings{ - Compute: convertAdvancedComputeAutoScaling20241023to20240805(advancedAutoScalingSettings.Compute), - DiskGB: convertDiskGBAutoScaling20241023to20240805(advancedAutoScalingSettings.DiskGB), - } -} - -func convertDiskGBAutoScaling20241023to20240805(diskGBAutoScaling *admin.DiskGBAutoScaling) *admin20240805.DiskGBAutoScaling { - if diskGBAutoScaling == nil { - return nil - } - return &admin20240805.DiskGBAutoScaling{ - Enabled: diskGBAutoScaling.Enabled, - } -} - -func convertAdvancedComputeAutoScaling20241023to20240805(advancedComputeAutoScaling *admin.AdvancedComputeAutoScaling) *admin20240805.AdvancedComputeAutoScaling { - if advancedComputeAutoScaling == nil { - return nil - } - return &admin20240805.AdvancedComputeAutoScaling{ - Enabled: advancedComputeAutoScaling.Enabled, - MaxInstanceSize: advancedComputeAutoScaling.MaxInstanceSize, - MinInstanceSize: advancedComputeAutoScaling.MinInstanceSize, - ScaleDownEnabled: advancedComputeAutoScaling.ScaleDownEnabled, - } -} - -func convertHardwareSpec20241023to20240805(hardwareSpec *admin.HardwareSpec20240805) *admin20240805.HardwareSpec20240805 { - if hardwareSpec == nil { - return nil - } - return &admin20240805.HardwareSpec20240805{ - DiskSizeGB: hardwareSpec.DiskSizeGB, - NodeCount: hardwareSpec.NodeCount, - DiskIOPS: hardwareSpec.DiskIOPS, - EbsVolumeType: hardwareSpec.EbsVolumeType, - InstanceSize: hardwareSpec.InstanceSize, - } -} - -func convertDedicatedHardwareSpec20241023to20240805(hardwareSpec *admin.DedicatedHardwareSpec20240805) *admin20240805.DedicatedHardwareSpec20240805 { - if hardwareSpec == nil { - return nil - } - return &admin20240805.DedicatedHardwareSpec20240805{ - DiskSizeGB: hardwareSpec.DiskSizeGB, - NodeCount: hardwareSpec.NodeCount, - DiskIOPS: hardwareSpec.DiskIOPS, - EbsVolumeType: hardwareSpec.EbsVolumeType, - InstanceSize: hardwareSpec.InstanceSize, - } -} - -func convertBiConnector20241023to20240805(biConnector *admin.BiConnector) *admin20240805.BiConnector { - if biConnector == nil { - return nil - } - return &admin20240805.BiConnector{ - ReadPreference: biConnector.ReadPreference, - Enabled: biConnector.Enabled, - } -} - -func convertAdvancedConfiguration20250312to20240805(advConfig *admin.ApiAtlasClusterAdvancedConfiguration) *admin20240805.ApiAtlasClusterAdvancedConfiguration { - if advConfig == nil { - return nil - } - - return &admin20240805.ApiAtlasClusterAdvancedConfiguration{ - MinimumEnabledTlsProtocol: advConfig.MinimumEnabledTlsProtocol, - CustomOpensslCipherConfigTls12: advConfig.CustomOpensslCipherConfigTls12, - TlsCipherConfigMode: advConfig.TlsCipherConfigMode, - } -} - -func convertLabels20241023to20240805(labels *[]admin.ComponentLabel) *[]admin20240805.ComponentLabel { - if labels == nil { - return nil - } - result := make([]admin20240805.ComponentLabel, len(*labels)) - for i, label := range *labels { - result[i] = admin20240805.ComponentLabel{ - Key: label.Key, - Value: label.Value, - } - } - return &result -} - -func convertTag20241023to20240805(tags *[]admin.ResourceTag) *[]admin20240805.ResourceTag { - if tags == nil { - return nil - } - result := make([]admin20240805.ResourceTag, len(*tags)) - for i, tag := range *tags { - result[i] = admin20240805.ResourceTag{ - Key: tag.Key, - Value: tag.Value, - } - } - return &result -} - -func ConvertRegionConfigSlice20241023to20240530(slice *[]admin.CloudRegionConfig20240805) *[]admin20240530.CloudRegionConfig { - if slice == nil { - return nil - } - cloudRegionSlice := *slice - results := make([]admin20240530.CloudRegionConfig, len(cloudRegionSlice)) - for i := range cloudRegionSlice { - cloudRegion := cloudRegionSlice[i] - results[i] = admin20240530.CloudRegionConfig{ - ElectableSpecs: convertHardwareSpec20241023to20240530(cloudRegion.ElectableSpecs), - Priority: cloudRegion.Priority, - ProviderName: cloudRegion.ProviderName, - RegionName: cloudRegion.RegionName, - AnalyticsAutoScaling: convertAdvancedAutoScalingSettings20241023to20240530(cloudRegion.AnalyticsAutoScaling), - AnalyticsSpecs: convertDedicatedHardwareSpec20241023to20240530(cloudRegion.AnalyticsSpecs), - AutoScaling: convertAdvancedAutoScalingSettings20241023to20240530(cloudRegion.AutoScaling), - ReadOnlySpecs: convertDedicatedHardwareSpec20241023to20240530(cloudRegion.ReadOnlySpecs), - BackingProviderName: cloudRegion.BackingProviderName, - } - } - return &results -} - -func convertHardwareSpec20241023to20240530(hwspec *admin.HardwareSpec20240805) *admin20240530.HardwareSpec { - if hwspec == nil { - return nil - } - return &admin20240530.HardwareSpec{ - DiskIOPS: hwspec.DiskIOPS, - EbsVolumeType: hwspec.EbsVolumeType, - InstanceSize: hwspec.InstanceSize, - NodeCount: hwspec.NodeCount, - } -} - -func convertAdvancedAutoScalingSettings20241023to20240530(settings *admin.AdvancedAutoScalingSettings) *admin20240530.AdvancedAutoScalingSettings { - if settings == nil { - return nil - } - return &admin20240530.AdvancedAutoScalingSettings{ - Compute: convertAdvancedComputeAutoScaling20241023to20240530(settings.Compute), - DiskGB: convertDiskGBAutoScaling20241023to20240530(settings.DiskGB), - } -} - -func convertAdvancedComputeAutoScaling20241023to20240530(settings *admin.AdvancedComputeAutoScaling) *admin20240530.AdvancedComputeAutoScaling { - if settings == nil { - return nil - } - return &admin20240530.AdvancedComputeAutoScaling{ - Enabled: settings.Enabled, - MaxInstanceSize: settings.MaxInstanceSize, - MinInstanceSize: settings.MinInstanceSize, - ScaleDownEnabled: settings.ScaleDownEnabled, - } -} - -func convertDiskGBAutoScaling20241023to20240530(settings *admin.DiskGBAutoScaling) *admin20240530.DiskGBAutoScaling { - if settings == nil { - return nil - } - return &admin20240530.DiskGBAutoScaling{ - Enabled: settings.Enabled, - } -} - -func convertDedicatedHardwareSpec20241023to20240530(spec *admin.DedicatedHardwareSpec20240805) *admin20240530.DedicatedHardwareSpec { - if spec == nil { - return nil - } - return &admin20240530.DedicatedHardwareSpec{ - NodeCount: spec.NodeCount, - DiskIOPS: spec.DiskIOPS, - EbsVolumeType: spec.EbsVolumeType, - InstanceSize: spec.InstanceSize, - } -} diff --git a/internal/service/advancedcluster/common_model_sdk_version_conversion_test.go b/internal/service/advancedcluster/common_model_sdk_version_conversion_test.go deleted file mode 100644 index 88fa992c63..0000000000 --- a/internal/service/advancedcluster/common_model_sdk_version_conversion_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package advancedcluster_test - -import ( - "testing" - - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/conversion" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" - "github.com/stretchr/testify/assert" - admin20240805 "go.mongodb.org/atlas-sdk/v20240805005/admin" - "go.mongodb.org/atlas-sdk/v20250312007/admin" -) - -func TestConvertClusterDescription20241023to20240805(t *testing.T) { - var ( - clusterName = "clusterName" - clusterType = "REPLICASET" - earProvider = "AWS" - booleanValue = true - mongoDBMajorVersion = "7.0" - rootCertType = "rootCertType" - replicaSetScalingStrategy = "WORKLOAD_TYPE" - configServerManagementMode = "ATLAS_MANAGED" - readPreference = "primary" - zoneName = "z1" - id = "id1" - regionConfigProvider = "AWS" - region = "EU_WEST_1" - priority = 7 - instanceSize = "M10" - nodeCount = 3 - diskSizeGB = 30.3 - ebsVolumeType = "STANDARD" - diskIOPS = 100 - ) - testCases := []struct { - input *admin.ClusterDescription20240805 - expectedOutput *admin20240805.ClusterDescription20240805 - name string - }{ - { - name: "Converts cluster description from 20241023 to 20240805", - input: &admin.ClusterDescription20240805{ - Name: conversion.StringPtr(clusterName), - ClusterType: conversion.StringPtr(clusterType), - ReplicationSpecs: &[]admin.ReplicationSpec20240805{ - { - Id: conversion.StringPtr(id), - ZoneName: conversion.StringPtr(zoneName), - RegionConfigs: &[]admin.CloudRegionConfig20240805{ - { - ProviderName: conversion.StringPtr(regionConfigProvider), - RegionName: conversion.StringPtr(region), - BackingProviderName: conversion.StringPtr(regionConfigProvider), - Priority: conversion.IntPtr(priority), - AnalyticsSpecs: &admin.DedicatedHardwareSpec20240805{ - InstanceSize: conversion.StringPtr(instanceSize), - NodeCount: conversion.IntPtr(nodeCount), - DiskSizeGB: conversion.Pointer(diskSizeGB), - EbsVolumeType: conversion.StringPtr(ebsVolumeType), - DiskIOPS: conversion.IntPtr(diskIOPS), - }, - ElectableSpecs: &admin.HardwareSpec20240805{ - InstanceSize: conversion.StringPtr(instanceSize), - NodeCount: conversion.IntPtr(nodeCount), - DiskSizeGB: conversion.Pointer(diskSizeGB), - EbsVolumeType: conversion.StringPtr(ebsVolumeType), - DiskIOPS: conversion.IntPtr(diskIOPS), - }, - AutoScaling: &admin.AdvancedAutoScalingSettings{ - Compute: &admin.AdvancedComputeAutoScaling{ - Enabled: conversion.Pointer(booleanValue), - MaxInstanceSize: conversion.Pointer(instanceSize), - MinInstanceSize: conversion.Pointer(instanceSize), - ScaleDownEnabled: conversion.Pointer(booleanValue), - }, - DiskGB: &admin.DiskGBAutoScaling{ - Enabled: conversion.Pointer(booleanValue), - }, - }, - }, - }, - }, - }, - BackupEnabled: conversion.Pointer(booleanValue), - BiConnector: &admin.BiConnector{ - Enabled: conversion.Pointer(booleanValue), - ReadPreference: conversion.StringPtr(readPreference), - }, - EncryptionAtRestProvider: conversion.StringPtr(earProvider), - Labels: &[]admin.ComponentLabel{ - {Key: conversion.StringPtr("key1"), Value: conversion.StringPtr("value1")}, - {Key: conversion.StringPtr("key2"), Value: conversion.StringPtr("value2")}, - }, - Tags: &[]admin.ResourceTag{ - {Key: "key1", Value: "value1"}, - {Key: "key2", Value: "value2"}, - }, - MongoDBMajorVersion: conversion.StringPtr(mongoDBMajorVersion), - PitEnabled: conversion.Pointer(booleanValue), - RootCertType: conversion.StringPtr(rootCertType), - TerminationProtectionEnabled: conversion.Pointer(booleanValue), - VersionReleaseSystem: conversion.StringPtr(""), - GlobalClusterSelfManagedSharding: conversion.Pointer(booleanValue), - ReplicaSetScalingStrategy: conversion.StringPtr(replicaSetScalingStrategy), - RedactClientLogData: conversion.Pointer(booleanValue), - ConfigServerManagementMode: conversion.StringPtr(configServerManagementMode), - }, - expectedOutput: &admin20240805.ClusterDescription20240805{ - Name: conversion.StringPtr(clusterName), - ClusterType: conversion.StringPtr(clusterType), - ReplicationSpecs: &[]admin20240805.ReplicationSpec20240805{ - { - Id: conversion.StringPtr(id), - ZoneName: conversion.StringPtr(zoneName), - RegionConfigs: &[]admin20240805.CloudRegionConfig20240805{ - { - ProviderName: conversion.StringPtr(regionConfigProvider), - RegionName: conversion.StringPtr(region), - BackingProviderName: conversion.StringPtr(regionConfigProvider), - Priority: conversion.IntPtr(priority), - AnalyticsSpecs: &admin20240805.DedicatedHardwareSpec20240805{ - InstanceSize: conversion.StringPtr(instanceSize), - NodeCount: conversion.IntPtr(nodeCount), - DiskSizeGB: conversion.Pointer(diskSizeGB), - EbsVolumeType: conversion.StringPtr(ebsVolumeType), - DiskIOPS: conversion.IntPtr(diskIOPS), - }, - ElectableSpecs: &admin20240805.HardwareSpec20240805{ - InstanceSize: conversion.StringPtr(instanceSize), - NodeCount: conversion.IntPtr(nodeCount), - DiskSizeGB: conversion.Pointer(diskSizeGB), - EbsVolumeType: conversion.StringPtr(ebsVolumeType), - DiskIOPS: conversion.IntPtr(diskIOPS), - }, - AutoScaling: &admin20240805.AdvancedAutoScalingSettings{ - Compute: &admin20240805.AdvancedComputeAutoScaling{ - Enabled: conversion.Pointer(booleanValue), - MaxInstanceSize: conversion.Pointer(instanceSize), - MinInstanceSize: conversion.Pointer(instanceSize), - ScaleDownEnabled: conversion.Pointer(booleanValue), - }, - DiskGB: &admin20240805.DiskGBAutoScaling{ - Enabled: conversion.Pointer(booleanValue), - }, - }, - }, - }, - }, - }, - BackupEnabled: conversion.Pointer(booleanValue), - BiConnector: &admin20240805.BiConnector{ - Enabled: conversion.Pointer(booleanValue), - ReadPreference: conversion.StringPtr(readPreference), - }, - EncryptionAtRestProvider: conversion.StringPtr(earProvider), - Labels: &[]admin20240805.ComponentLabel{ - {Key: conversion.StringPtr("key1"), Value: conversion.StringPtr("value1")}, - {Key: conversion.StringPtr("key2"), Value: conversion.StringPtr("value2")}, - }, - Tags: &[]admin20240805.ResourceTag{ - {Key: "key1", Value: "value1"}, - {Key: "key2", Value: "value2"}, - }, - MongoDBMajorVersion: conversion.StringPtr(mongoDBMajorVersion), - PitEnabled: conversion.Pointer(booleanValue), - RootCertType: conversion.StringPtr(rootCertType), - TerminationProtectionEnabled: conversion.Pointer(booleanValue), - VersionReleaseSystem: conversion.StringPtr(""), - GlobalClusterSelfManagedSharding: conversion.Pointer(booleanValue), - ReplicaSetScalingStrategy: conversion.StringPtr(replicaSetScalingStrategy), - RedactClientLogData: conversion.Pointer(booleanValue), - ConfigServerManagementMode: conversion.StringPtr(configServerManagementMode), - }, - }, - { - name: "Converts cluster description from 20241023 to 20240805 with nil values", - input: &admin.ClusterDescription20240805{}, - expectedOutput: &admin20240805.ClusterDescription20240805{ - ReplicationSpecs: nil, - BiConnector: nil, - Labels: nil, - Tags: nil, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := advancedcluster.ConvertClusterDescription20241023to20240805(tc.input) - assert.Equal(t, tc.expectedOutput, result) - }) - } -} diff --git a/internal/service/advancedcluster/resource_test.go b/internal/service/advancedcluster/resource_test.go index 33379e5107..2d01f463c8 100644 --- a/internal/service/advancedcluster/resource_test.go +++ b/internal/service/advancedcluster/resource_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" "go.mongodb.org/atlas-sdk/v20250312007/admin" "github.com/hashicorp/terraform-plugin-framework/diag" @@ -288,8 +287,9 @@ func TestAccClusterAdvancedCluster_pausedToUnpaused(t *testing.T) { func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T) { var ( projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 4) - - processArgs20240530 = &admin20240530.ClusterDescriptionProcessArgs{ + processArgs = &admin.ClusterDescriptionProcessArgs20240805{ + ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds: conversion.IntPtr(-1), // this will not be set in the TF configuration + DefaultMaxTimeMS: conversion.IntPtr(65), DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -299,11 +299,6 @@ func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), } - processArgs = &admin.ClusterDescriptionProcessArgs20240805{ - ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds: conversion.IntPtr(-1), // this will not be set in the TF configuration - DefaultMaxTimeMS: conversion.IntPtr(65), - } - processArgsCipherConfig = &admin.ClusterDescriptionProcessArgs20240805{ TlsCipherConfigMode: conversion.StringPtr("CUSTOM"), CustomOpensslCipherConfigTls12: &[]string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, @@ -316,11 +311,11 @@ func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configAdvanced(t, projectID, clusterName, "7.0", processArgs20240530, processArgs), + Config: configAdvanced(t, projectID, clusterName, "7.0", processArgs), ExpectError: regexp.MustCompile(errDefaultMaxTimeMinVersion), }, { - Config: configAdvanced(t, projectID, clusterName, "7.0", processArgs20240530, processArgsCipherConfig), + Config: configAdvanced(t, projectID, clusterName, "7.0", processArgsCipherConfig), Check: checkAdvanced(clusterName, "TLS1_2", processArgsCipherConfig), }, acc.TestStepImportCluster(resourceName), @@ -332,7 +327,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { var ( projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 4) clusterNameUpdated = acc.RandomClusterName() - processArgs20240530 = &admin20240530.ClusterDescriptionProcessArgs{ + processArgs = &admin.ClusterDescriptionProcessArgs20240805{ DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -341,13 +336,10 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { SampleRefreshIntervalBIConnector: conversion.Pointer(310), SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), - } - processArgs = &admin.ClusterDescriptionProcessArgs20240805{ ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds: conversion.IntPtr(-1), // this will not be set in the TF configuration TlsCipherConfigMode: conversion.StringPtr("DEFAULT"), } - - processArgs20240530Updated = &admin20240530.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin.ClusterDescriptionProcessArgs20240805{ DefaultWriteConcern: conversion.StringPtr("0"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -356,9 +348,7 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { SampleRefreshIntervalBIConnector: conversion.Pointer(310), SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), - } - processArgsUpdated = &admin.ClusterDescriptionProcessArgs20240805{ - DefaultMaxTimeMS: conversion.IntPtr(65), + DefaultMaxTimeMS: conversion.IntPtr(65), ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds: conversion.IntPtr(100), TlsCipherConfigMode: conversion.StringPtr("CUSTOM"), CustomOpensslCipherConfigTls12: &[]string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, @@ -376,15 +366,15 @@ func TestAccClusterAdvancedCluster_advancedConfig(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configAdvanced(t, projectID, clusterName, "", processArgs20240530, processArgs), + Config: configAdvanced(t, projectID, clusterName, "", processArgs), Check: checkAdvanced(clusterName, "TLS1_2", processArgs), }, { - Config: configAdvanced(t, projectID, clusterNameUpdated, "", processArgs20240530Updated, processArgsUpdated), + Config: configAdvanced(t, projectID, clusterNameUpdated, "", processArgsUpdated), Check: checkAdvanced(clusterNameUpdated, "TLS1_2", processArgsUpdated), }, { - Config: configAdvanced(t, projectID, clusterNameUpdated, "", processArgs20240530Updated, processArgsUpdatedCipherConfig), + Config: configAdvanced(t, projectID, clusterNameUpdated, "", processArgsUpdatedCipherConfig), Check: checkAdvanced(clusterNameUpdated, "TLS1_2", processArgsUpdatedCipherConfig), }, acc.TestStepImportCluster(resourceName), @@ -396,7 +386,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { var ( projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 4) clusterNameUpdated = acc.RandomClusterName() - processArgs = &admin20240530.ClusterDescriptionProcessArgs{ + processArgs = &admin.ClusterDescriptionProcessArgs20240805{ DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -405,7 +395,7 @@ func TestAccClusterAdvancedCluster_defaultWrite(t *testing.T) { SampleRefreshIntervalBIConnector: conversion.Pointer(310), SampleSizeBIConnector: conversion.Pointer(110), } - processArgsUpdated = &admin20240530.ClusterDescriptionProcessArgs{ + processArgsUpdated = &admin.ClusterDescriptionProcessArgs20240805{ DefaultWriteConcern: conversion.StringPtr("majority"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -1903,7 +1893,7 @@ func checkSingleProviderPaused(name string, paused bool) resource.TestCheckFunc "paused": strconv.FormatBool(paused)}) } -func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion string, p20240530 *admin20240530.ClusterDescriptionProcessArgs, p *admin.ClusterDescriptionProcessArgs20240805) string { +func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion string, p *admin.ClusterDescriptionProcessArgs20240805) string { t.Helper() changeStreamOptionsStr := "" defaultMaxTimeStr := "" @@ -1969,8 +1959,8 @@ func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion st %[14]s } } - `, projectID, clusterName, p20240530.GetJavascriptEnabled(), p20240530.GetMinimumEnabledTlsProtocol(), p20240530.GetNoTableScan(), - p20240530.GetOplogSizeMB(), p20240530.GetSampleSizeBIConnector(), p20240530.GetSampleRefreshIntervalBIConnector(), p20240530.GetTransactionLifetimeLimitSeconds(), + `, projectID, clusterName, p.GetJavascriptEnabled(), p.GetMinimumEnabledTlsProtocol(), p.GetNoTableScan(), + p.GetOplogSizeMB(), p.GetSampleSizeBIConnector(), p.GetSampleRefreshIntervalBIConnector(), p.GetTransactionLifetimeLimitSeconds(), changeStreamOptionsStr, defaultMaxTimeStr, mongoDBMajorVersionStr, tlsCipherConfigModeStr, customOpensslCipherConfigTLS12Str) + dataSourcesConfig } @@ -2013,7 +2003,7 @@ func checkAdvanced(name, tls string, processArgs *admin.ClusterDescriptionProces ) } -func configAdvancedDefaultWrite(t *testing.T, projectID, clusterName string, p *admin20240530.ClusterDescriptionProcessArgs) string { +func configAdvancedDefaultWrite(t *testing.T, projectID, clusterName string, p *admin.ClusterDescriptionProcessArgs20240805) string { t.Helper() return fmt.Sprintf(` resource "mongodbatlas_advanced_cluster" "test" { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 8475d75015..4c1aee7e60 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -4,7 +4,7 @@ import ( "os" "testing" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20250312007/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -19,7 +19,7 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{CloudBackup: true}) useYearly = mig.IsProviderVersionAtLeast("1.16.0") // attribute introduced in this version - config = configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + config = configNewPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -65,12 +65,12 @@ func TestMigBackupRSCloudBackupSchedule_copySettings(t *testing.T) { terraformStr = clusterInfo.TerraformStr clusterResourceName = clusterInfo.ResourceName projectID = clusterInfo.ProjectID - copySettingsConfigWithRepSpecID = configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin20240530.DiskBackupSnapshotSchedule{ + copySettingsConfigWithRepSpecID = configCopySettings(terraformStr, projectID, clusterResourceName, false, true, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), }) - copySettingsConfigWithZoneID = configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ + copySettingsConfigWithZoneID = configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 9bc22321c3..f4cc2ee516 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - admin20240530 "go.mongodb.org/atlas-sdk/v20240530005/admin" + "go.mongodb.org/atlas-sdk/v20250312007/admin" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -31,7 +31,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configNoPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configNoPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -59,7 +59,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configNewPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configNewPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -102,7 +102,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { ), }, { - Config: configAdvancedPolicies(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configAdvancedPolicies(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -203,7 +203,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -237,7 +237,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { ), }, { - Config: configOnePolicy(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configOnePolicy(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(0), ReferenceMinuteOfHour: conversion.Pointer(0), RestoreWindowDays: conversion.Pointer(7), @@ -328,7 +328,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, false, false, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -336,7 +336,7 @@ func TestAccBackupRSCloudBackupSchedule_copySettings_zoneId(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc(checksCreateAll...), }, { - Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, false, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configCopySettings(terraformStr, projectID, clusterResourceName, true, false, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(1), @@ -358,7 +358,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configDefault(&clusterInfo, &admin20240530.DiskBackupSnapshotSchedule{ + Config: configDefault(&clusterInfo, &admin.DiskBackupSnapshotSchedule20240805{ ReferenceHourOfDay: conversion.Pointer(3), ReferenceMinuteOfHour: conversion.Pointer(45), RestoreWindowDays: conversion.Pointer(4), @@ -413,7 +413,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin.BackupComplianceOnDemandPolicyItem{ FrequencyInterval: 1, RetentionUnit: "days", RetentionValue: 1, @@ -426,7 +426,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), }, { - Config: configAzure(&clusterInfo, &admin20240530.DiskBackupApiPolicyItem{ + Config: configAzure(&clusterInfo, &admin.BackupComplianceOnDemandPolicyItem{ FrequencyInterval: 2, RetentionUnit: "days", RetentionValue: 3, @@ -492,7 +492,7 @@ func checkDestroy(s *terraform.State) error { return nil } -func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule20240805) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -510,7 +510,7 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshot `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule20240805) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -554,7 +554,7 @@ func configDefault(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSch `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings, useRepSpecID bool, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings, useRepSpecID bool, p *admin.DiskBackupSnapshotSchedule20240805) string { var copySettings string var dataSourceConfig string @@ -641,7 +641,7 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp `, terraformStr, projectID, clusterResourceName, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), copySettings, dataSourceConfig) } -func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule20240805) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -660,7 +660,7 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotS `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } -func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule, useYearly bool) string { +func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule20240805, useYearly bool) string { var strYearly string if useYearly { strYearly = ` @@ -711,7 +711,7 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapsho `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } -func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolicyItem) string { +func configAzure(info *acc.ClusterInfo, policy *admin.BackupComplianceOnDemandPolicyItem) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s @@ -731,7 +731,7 @@ func configAzure(info *acc.ClusterInfo, policy *admin20240530.DiskBackupApiPolic `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } -func configAdvancedPolicies(info *acc.ClusterInfo, p *admin20240530.DiskBackupSnapshotSchedule) string { +func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule20240805) string { return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s diff --git a/internal/testutil/acc/independent_shard_scaling.go b/internal/testutil/acc/independent_shard_scaling.go index 7dc3f1e544..9de04da658 100644 --- a/internal/testutil/acc/independent_shard_scaling.go +++ b/internal/testutil/acc/independent_shard_scaling.go @@ -7,11 +7,12 @@ import ( "os" "github.com/mongodb-forks/digest" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/config" ) func GetIndependentShardScalingMode(ctx context.Context, projectID, clusterName string) (*string, *http.Response, error) { - baseURL := os.Getenv("MONGODB_ATLAS_BASE_URL") - req, err := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+"test/utils/auth/groups/"+projectID+"/clusters/"+clusterName+"/independentShardScalingMode", http.NoBody) + baseURL := config.NormalizeBaseURL(os.Getenv("MONGODB_ATLAS_BASE_URL")) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+"/test/utils/auth/groups/"+projectID+"/clusters/"+clusterName+"/independentShardScalingMode", http.NoBody) if err != nil { return nil, nil, err } From f46bf9f951477fcd240969bfd099f751c6902437 Mon Sep 17 00:00:00 2001 From: Leo Antoli <430982+lantoli@users.noreply.github.com> Date: Thu, 9 Oct 2025 14:30:14 +0200 Subject: [PATCH 8/8] chore: Fix some tests in PAK and SA executions (#3765) --- .github/workflows/acceptance-tests-runner.yml | 6 +- internal/config/client.go | 2 +- internal/provider/provider.go | 26 ++--- internal/provider/provider_sdk2.go | 6 +- .../service/advancedcluster/resource_test.go | 103 +++++++++--------- 5 files changed, 69 insertions(+), 74 deletions(-) diff --git a/.github/workflows/acceptance-tests-runner.yml b/.github/workflows/acceptance-tests-runner.yml index 5ae3519aba..3f3bebcd08 100644 --- a/.github/workflows/acceptance-tests-runner.yml +++ b/.github/workflows/acceptance-tests-runner.yml @@ -1150,7 +1150,8 @@ jobs: resource_policy: needs: [ change-detection, get-provider-version ] - if: ${{ needs.change-detection.outputs.resource_policy == 'true' || inputs.test_group == 'resource_policy' }} + # Skip in SA as it uses a different org and credentials. + if: ${{ inputs.use_sa == false && (needs.change-detection.outputs.resource_policy == 'true' || inputs.test_group == 'resource_policy') }} runs-on: ubuntu-latest permissions: {} steps: @@ -1170,8 +1171,7 @@ jobs: MONGODB_ATLAS_PUBLIC_KEY: ${{ secrets.mongodb_atlas_rp_public_key }} MONGODB_ATLAS_PRIVATE_KEY: ${{ secrets.mongodb_atlas_rp_private_key }} MONGODB_ATLAS_LAST_VERSION: ${{ needs.get-provider-version.outputs.provider_version }} - ACCTEST_PACKAGES: | - ./internal/service/resourcepolicy + ACCTEST_PACKAGES: ./internal/service/resourcepolicy run: make testacc search_deployment: diff --git a/internal/config/client.go b/internal/config/client.go index c6245ed1cb..006409789e 100644 --- a/internal/config/client.go +++ b/internal/config/client.go @@ -220,7 +220,7 @@ func (r *RealmClient) Get(ctx context.Context) (*realm.Client, error) { if r.realmBaseURL != "" { adminURL := r.realmBaseURL + "/api/admin/v3.0/" optsRealm = append(optsRealm, realm.SetBaseURL(adminURL)) - authConfig.AuthURL, _ = url.Parse(adminURL + "/auth/providers/mongodb-cloud/login") + authConfig.AuthURL, _ = url.Parse(adminURL + "auth/providers/mongodb-cloud/login") } token, err := authConfig.NewTokenFromCredentials(ctx, r.publicKey, r.privateKey) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 3801398d08..c4a8a17243 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -50,8 +50,6 @@ import ( ) const ( - govURL = "https://cloud.mongodbgov.com" - MongodbGovCloudDevURL = "https://cloud-dev.mongodbgov.com" ProviderConfigError = "error in configuring the provider." MissingAuthAttrError = "either AWS Secrets Manager, Service Accounts or Atlas Programmatic API Keys attributes must be set" ProviderMetaUserAgentExtra = "user_agent_extra" @@ -62,13 +60,6 @@ const ( ProviderMetaModuleVersionDesc = "The version of the module using the provider" ) -var ( - govAdditionalURLs = []string{ - "https://cloud-dev.mongodbgov.com", - "https://cloud-qa.mongodbgov.com", - } -) - type MongodbtlasProvider struct { } @@ -230,10 +221,7 @@ func getProviderVars(ctx context.Context, req provider.ConfigureRequest, resp *p if len(data.AssumeRole) > 0 { assumeRoleARN = data.AssumeRole[0].RoleARN.ValueString() } - baseURL := data.BaseURL.ValueString() - if data.IsMongodbGovCloud.ValueBool() && !slices.Contains(govAdditionalURLs, baseURL) { - baseURL = govURL - } + baseURL := applyGovBaseURLIfNeeded(data.BaseURL.ValueString(), data.IsMongodbGovCloud.ValueBool()) return &config.Vars{ AccessToken: data.AccessToken.ValueString(), ClientID: data.ClientID.ValueString(), @@ -252,6 +240,18 @@ func getProviderVars(ctx context.Context, req provider.ConfigureRequest, resp *p } } +func applyGovBaseURLIfNeeded(providerBaseURL string, providerIsMongodbGovCloud bool) string { + const govURL = "https://cloud.mongodbgov.com" + govAdditionalURLs := []string{ + "https://cloud-dev.mongodbgov.com", + "https://cloud-qa.mongodbgov.com", + } + if providerIsMongodbGovCloud && !slices.Contains(govAdditionalURLs, config.NormalizeBaseURL(providerBaseURL)) { + return govURL + } + return providerBaseURL +} + func (p *MongodbtlasProvider) DataSources(context.Context) []func() datasource.DataSource { dataSources := []func() datasource.DataSource{ project.DataSource, diff --git a/internal/provider/provider_sdk2.go b/internal/provider/provider_sdk2.go index 0259167d13..a28ce40bbf 100644 --- a/internal/provider/provider_sdk2.go +++ b/internal/provider/provider_sdk2.go @@ -3,7 +3,6 @@ package provider import ( "context" "fmt" - "slices" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -319,10 +318,7 @@ func getSDKv2ProviderVars(d *schema.ResourceData) *config.Vars { assumeRoleARN = assumeRole["role_arn"].(string) } } - baseURL := d.Get("base_url").(string) - if d.Get("is_mongodbgov_cloud").(bool) && !slices.Contains(govAdditionalURLs, baseURL) { - baseURL = govURL - } + baseURL := applyGovBaseURLIfNeeded(d.Get("base_url").(string), d.Get("is_mongodbgov_cloud").(bool)) return &config.Vars{ AccessToken: d.Get("access_token").(string), ClientID: d.Get("client_id").(string), diff --git a/internal/service/advancedcluster/resource_test.go b/internal/service/advancedcluster/resource_test.go index 2d01f463c8..3c4362c9ea 100644 --- a/internal/service/advancedcluster/resource_test.go +++ b/internal/service/advancedcluster/resource_test.go @@ -287,9 +287,7 @@ func TestAccClusterAdvancedCluster_pausedToUnpaused(t *testing.T) { func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T) { var ( projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 4) - processArgs = &admin.ClusterDescriptionProcessArgs20240805{ - ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds: conversion.IntPtr(-1), // this will not be set in the TF configuration - DefaultMaxTimeMS: conversion.IntPtr(65), + processArgsCommon = &admin.ClusterDescriptionProcessArgs20240805{ DefaultWriteConcern: conversion.StringPtr("1"), JavascriptEnabled: conversion.Pointer(true), MinimumEnabledTlsProtocol: conversion.StringPtr("TLS1_2"), @@ -299,11 +297,13 @@ func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T SampleSizeBIConnector: conversion.Pointer(110), TransactionLifetimeLimitSeconds: conversion.Pointer[int64](300), } - processArgsCipherConfig = &admin.ClusterDescriptionProcessArgs20240805{ - TlsCipherConfigMode: conversion.StringPtr("CUSTOM"), - CustomOpensslCipherConfigTls12: &[]string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"}, - } ) + processArgs := *processArgsCommon + processArgs.DefaultMaxTimeMS = conversion.IntPtr(65) + + processArgsCipherConfig := *processArgsCommon + processArgsCipherConfig.TlsCipherConfigMode = conversion.StringPtr("CUSTOM") + processArgsCipherConfig.CustomOpensslCipherConfigTls12 = &[]string{"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"} resource.ParallelTest(t, resource.TestCase{ PreCheck: acc.PreCheckBasicSleep(t, nil, projectID, clusterName), @@ -311,12 +311,12 @@ func TestAccClusterAdvancedCluster_advancedConfig_oldMongoDBVersion(t *testing.T CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: configAdvanced(t, projectID, clusterName, "7.0", processArgs), + Config: configAdvanced(t, projectID, clusterName, "7.0", &processArgs), ExpectError: regexp.MustCompile(errDefaultMaxTimeMinVersion), }, { - Config: configAdvanced(t, projectID, clusterName, "7.0", processArgsCipherConfig), - Check: checkAdvanced(clusterName, "TLS1_2", processArgsCipherConfig), + Config: configAdvanced(t, projectID, clusterName, "7.0", &processArgsCipherConfig), + Check: checkAdvanced(clusterName, "TLS1_2", &processArgsCipherConfig), }, acc.TestStepImportCluster(resourceName), }, @@ -1108,7 +1108,7 @@ func TestAccAdvancedCluster_createTimeoutWithDeleteOnCreateReplicaset(t *testing Timeout: 60 * time.Second, IsDelete: true, }, "waiting for cluster to be deleted after cleanup in create timeout", diags) - time.Sleep(1 * time.Minute) // decrease the chance of `CONTAINER_WAITING_FOR_FAST_RECORD_CLEAN_UP`: "A transient error occurred. Please try again in a minute or use a different name" + time.Sleep(2 * time.Minute) // decrease the chance of `CONTAINER_WAITING_FOR_FAST_RECORD_CLEAN_UP`: "A transient error occurred. Please try again in a minute or use a different name" } ) resource.ParallelTest(t, *createCleanupTest(t, configCall, waitOnClusterDeleteDone, true)) @@ -1895,31 +1895,43 @@ func checkSingleProviderPaused(name string, paused bool) resource.TestCheckFunc func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion string, p *admin.ClusterDescriptionProcessArgs20240805) string { t.Helper() - changeStreamOptionsStr := "" - defaultMaxTimeStr := "" - tlsCipherConfigModeStr := "" - customOpensslCipherConfigTLS12Str := "" + advancedConfig := "" mongoDBMajorVersionStr := "" - - if p != nil { - if p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds != nil && p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds != conversion.IntPtr(-1) { - changeStreamOptionsStr = fmt.Sprintf(`change_stream_options_pre_and_post_images_expire_after_seconds = %[1]d`, *p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds) - } - if p.DefaultMaxTimeMS != nil { - defaultMaxTimeStr = fmt.Sprintf(`default_max_time_ms = %[1]d`, *p.DefaultMaxTimeMS) - } - if p.TlsCipherConfigMode != nil { - tlsCipherConfigModeStr = fmt.Sprintf(`tls_cipher_config_mode = %[1]q`, *p.TlsCipherConfigMode) - if p.CustomOpensslCipherConfigTls12 != nil && len(*p.CustomOpensslCipherConfigTls12) > 0 { - customOpensslCipherConfigTLS12Str = fmt.Sprintf( - `custom_openssl_cipher_config_tls12 = [%s]`, - acc.JoinQuotedStrings(*p.CustomOpensslCipherConfigTls12), - ) - } + if mongoDBMajorVersion != "" { + mongoDBMajorVersionStr = fmt.Sprintf("mongo_db_major_version = %[1]q\n", mongoDBMajorVersion) + } + if p.JavascriptEnabled != nil { + advancedConfig += fmt.Sprintf("javascript_enabled = %[1]t\n", *p.JavascriptEnabled) + } + if p.NoTableScan != nil { + advancedConfig += fmt.Sprintf("no_table_scan = %[1]t\n", *p.NoTableScan) + } + if p.OplogSizeMB != nil { + advancedConfig += fmt.Sprintf("oplog_size_mb = %[1]d\n", *p.OplogSizeMB) + } + if p.SampleRefreshIntervalBIConnector != nil { + advancedConfig += fmt.Sprintf("sample_refresh_interval_bi_connector = %[1]d\n", *p.SampleRefreshIntervalBIConnector) + } + if p.SampleSizeBIConnector != nil { + advancedConfig += fmt.Sprintf("sample_size_bi_connector = %[1]d\n", *p.SampleSizeBIConnector) + } + if p.TransactionLifetimeLimitSeconds != nil { + advancedConfig += fmt.Sprintf("transaction_lifetime_limit_seconds = %[1]d\n", *p.TransactionLifetimeLimitSeconds) + } + if p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds != nil && *p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds != -1 { + advancedConfig += fmt.Sprintf("change_stream_options_pre_and_post_images_expire_after_seconds = %[1]d\n", *p.ChangeStreamOptionsPreAndPostImagesExpireAfterSeconds) + } + if p.DefaultMaxTimeMS != nil { + advancedConfig += fmt.Sprintf("default_max_time_ms = %[1]d\n", *p.DefaultMaxTimeMS) + } + if p.TlsCipherConfigMode != nil { + advancedConfig += fmt.Sprintf("tls_cipher_config_mode = %[1]q\n", *p.TlsCipherConfigMode) + if p.CustomOpensslCipherConfigTls12 != nil && len(*p.CustomOpensslCipherConfigTls12) > 0 { + advancedConfig += fmt.Sprintf("custom_openssl_cipher_config_tls12 = [%s]\n", acc.JoinQuotedStrings(*p.CustomOpensslCipherConfigTls12)) } } - if mongoDBMajorVersion != "" { - mongoDBMajorVersionStr = fmt.Sprintf(`mongo_db_major_version = %[1]q`, mongoDBMajorVersion) + if p.MinimumEnabledTlsProtocol != nil { + advancedConfig += fmt.Sprintf("minimum_enabled_tls_protocol = %[1]q\n", *p.MinimumEnabledTlsProtocol) } return fmt.Sprintf(` @@ -1927,8 +1939,7 @@ func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion st project_id = %[1]q name = %[2]q cluster_type = "REPLICASET" - %[12]s - + %[3]s replication_specs = [{ region_configs = [{ electable_specs = { @@ -1946,22 +1957,10 @@ func configAdvanced(t *testing.T, projectID, clusterName, mongoDBMajorVersion st }] advanced_configuration = { - javascript_enabled = %[3]t - minimum_enabled_tls_protocol = %[4]q - no_table_scan = %[5]t - oplog_size_mb = %[6]d - sample_size_bi_connector = %[7]d - sample_refresh_interval_bi_connector = %[8]d - transaction_lifetime_limit_seconds = %[9]d - %[10]s - %[11]s - %[13]s - %[14]s + %[4]s } } - `, projectID, clusterName, p.GetJavascriptEnabled(), p.GetMinimumEnabledTlsProtocol(), p.GetNoTableScan(), - p.GetOplogSizeMB(), p.GetSampleSizeBIConnector(), p.GetSampleRefreshIntervalBIConnector(), p.GetTransactionLifetimeLimitSeconds(), - changeStreamOptionsStr, defaultMaxTimeStr, mongoDBMajorVersionStr, tlsCipherConfigModeStr, customOpensslCipherConfigTLS12Str) + dataSourcesConfig + `, projectID, clusterName, mongoDBMajorVersionStr, advancedConfig) + dataSourcesConfig } func checkAdvanced(name, tls string, processArgs *admin.ClusterDescriptionProcessArgs20240805) resource.TestCheckFunc { @@ -2689,13 +2688,13 @@ func configPriority(t *testing.T, projectID, clusterName string, swapPriorities func configBiConnectorConfig(t *testing.T, projectID, name string, enabled bool) string { t.Helper() - additionalConfig := ` + advancedConfig := ` bi_connector_config = { enabled = false } ` if enabled { - additionalConfig = ` + advancedConfig = ` bi_connector_config = { enabled = true read_preference = "secondary" @@ -2727,7 +2726,7 @@ func configBiConnectorConfig(t *testing.T, projectID, name string, enabled bool) %[3]s } - `, projectID, name, additionalConfig) + dataSourcesConfig + `, projectID, name, advancedConfig) + dataSourcesConfig } func checkTenantBiConnectorConfig(projectID, name string, enabled bool) resource.TestCheckFunc {