Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .github/workflows/code-health.yml
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,7 @@ jobs:
CONFIG_PATH: ${{ steps.config-path.outputs.CONFIG_PATH }}
CONFIG_CONTENT: |
skip_update_check = true
telemetry_enabled = false

[__e2e]
org_id = 'a0123456789abcdef012345a'
Expand All @@ -349,7 +350,6 @@ jobs:
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'

[__e2e_snapshot]
Expand All @@ -359,7 +359,6 @@ jobs:
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'
run: |
echo "$CONFIG_CONTENT" > "$CONFIG_PATH"
Expand Down
112 changes: 60 additions & 52 deletions .github/workflows/update-e2e-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,53 +14,53 @@ jobs:
fail-fast: false
matrix:
package:
- atlas/autogeneration
- atlas/backup/compliancepolicy
- atlas/backup/flex
- atlas/backup/exports/buckets
- atlas/backup/exports/jobs
- atlas/backup/restores
- atlas/backup/schedule
- atlas/backup/snapshot
- atlas/clusters/file
- atlas/clusters/flags
- atlas/clusters/flex
- atlas/clusters/m0
- atlas/clusters/sharded
- atlas/clusters/upgrade
- atlas/datafederation/db
- atlas/datafederation/privatenetwork
- atlas/datafederation/querylimits
- atlas/decrypt
- atlas/deployments/atlasclusters
- atlas/deployments/local/auth/deprecated
- atlas/deployments/local/auth/new
- atlas/deployments/local/nocli
- atlas/deployments/local/noauth
- atlas/generic
- atlas/interactive
# - atlas/autogeneration
# - atlas/backup/compliancepolicy
# - atlas/backup/flex
# - atlas/backup/exports/buckets
# - atlas/backup/exports/jobs
# - atlas/backup/restores
# - atlas/backup/schedule
# - atlas/backup/snapshot
# - atlas/clusters/file
# - atlas/clusters/flags
# - atlas/clusters/flex
# - atlas/clusters/m0
# - atlas/clusters/sharded
# - atlas/clusters/upgrade
# - atlas/datafederation/db
# - atlas/datafederation/privatenetwork
# - atlas/datafederation/querylimits
# - atlas/decrypt
# - atlas/deployments/atlasclusters
# - atlas/deployments/local/auth/deprecated
# - atlas/deployments/local/auth/new
# - atlas/deployments/local/nocli
# - atlas/deployments/local/noauth
# - atlas/generic
# - atlas/interactive
- atlas/ldap
- atlas/livemigrations
- atlas/logs
- atlas/metrics
- atlas/networking
- atlas/onlinearchive
- atlas/performanceAdvisor
- atlas/plugin/install
- atlas/plugin/run
- atlas/plugin/uninstall
- atlas/plugin/update
- atlas/processes
# - atlas/livemigrations
# - atlas/logs
# - atlas/metrics
# - atlas/networking
# - atlas/onlinearchive
# - atlas/performanceAdvisor
# - atlas/plugin/install
# - atlas/plugin/run
# - atlas/plugin/uninstall
# - atlas/plugin/update
# - atlas/processes
- atlas/search
- atlas/search_nodes
- atlas/serverless/instance
- atlas/streams
- atlas/streams_with_cluster
# - atlas/search_nodes
# - atlas/serverless/instance
# - atlas/streams
# - atlas/streams_with_cluster
- atlas/clusters/iss
- atlas/iam
- brew
- config
- kubernetes
# - brew
# - config
# - kubernetes
steps:
- uses: GitHubSecurityLab/actions-permissions/monitor@v1
with:
Expand All @@ -84,6 +84,7 @@ jobs:
CONFIG_PATH: ${{ steps.config-path.outputs.CONFIG_PATH }}
CONFIG_CONTENT: |
skip_update_check = true
telemetry_enabled = false

[__e2e]
org_id = 'a0123456789abcdef012345a'
Expand All @@ -92,7 +93,6 @@ jobs:
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'

[__e2e_snapshot]
Expand All @@ -102,7 +102,6 @@ jobs:
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'
MONGODB_ATLAS_ORG_ID: ${{ secrets.MONGODB_ATLAS_ORG_ID }}
MONGODB_ATLAS_PROJECT_ID: ${{ secrets.MONGODB_ATLAS_PROJECT_ID }}
Expand Down Expand Up @@ -175,15 +174,15 @@ jobs:
CONFIG_PATH: ${{ steps.config-path.outputs.CONFIG_PATH }}
CONFIG_CONTENT: |
skip_update_check = true
telemetry_enabled = false

[__e2e]
org_id = '${{ secrets.MONGODB_ATLAS_ORG_ID }}'
project_id = '${{ secrets.MONGODB_ATLAS_PROJECT_ID }}'
public_api_key = '${{ secrets.MONGODB_ATLAS_PROJECT_ID }}'
private_api_key = '${{ secrets.MONGODB_ATLAS_PRIVATE_API_KEY }}'
ops_manager_url = '${{ secrets.MONGODB_ATLAS_OPS_MANAGER_URL }}'
org_id = 'a0123456789abcdef012345a'
project_id = 'b0123456789abcdef012345b'
public_api_key = 'ABCDEF01'
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'

[__e2e_snapshot]
Expand All @@ -193,10 +192,19 @@ jobs:
private_api_key = '12345678-abcd-ef01-2345-6789abcdef01'
ops_manager_url = 'http://localhost:8080/'
service = 'cloud'
telemetry_enabled = false
output = 'plaintext'
MONGODB_ATLAS_ORG_ID: ${{ secrets.MONGODB_ATLAS_ORG_ID }}
MONGODB_ATLAS_PROJECT_ID: ${{ secrets.MONGODB_ATLAS_PROJECT_ID }}
MONGODB_ATLAS_PRIVATE_API_KEY: ${{ secrets.MONGODB_ATLAS_PRIVATE_API_KEY }}
MONGODB_ATLAS_PUBLIC_API_KEY: ${{ secrets.MONGODB_ATLAS_PUBLIC_API_KEY }}
MONGODB_ATLAS_OPS_MANAGER_URL: ${{ secrets.MONGODB_ATLAS_OPS_MANAGER_URL }}
run: |
echo "$CONFIG_CONTENT" > "$CONFIG_PATH"
./bin/atlas config set org_id "$MONGODB_ATLAS_ORG_ID" -P __e2e
./bin/atlas config set project_id "$MONGODB_ATLAS_PROJECT_ID" -P __e2e
./bin/atlas config set public_api_key "$MONGODB_ATLAS_PUBLIC_API_KEY" -P __e2e
./bin/atlas config set private_api_key "$MONGODB_ATLAS_PRIVATE_API_KEY" -P __e2e
./bin/atlas config set ops_manager_url "$MONGODB_ATLAS_OPS_MANAGER_URL" -P __e2e
- run: make e2e-test
env:
TEST_CMD: gotestsum --junitfile e2e-tests.xml --format standard-verbose --
Expand Down
2 changes: 1 addition & 1 deletion scripts/add-e2e-profiles.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ set -euo pipefail
./bin/atlas config delete __e2e --force >/dev/null 2>&1 || true

# Prompt if user wants to use cloud-dev.mongodb.com
read -p "Do you want to set ops_manager_url to cloud-dev.mongodb.com? [Y/n] " -n 1 -r
read -p "Do you want to set ops_manager_url to cloud.mongodb.com? [Y/n] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
ops_manager_url="https://cloud-dev.mongodb.com/"
Expand Down
3 changes: 3 additions & 0 deletions test/internal/cleanup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,16 +76,19 @@ func TestCleanup(t *testing.T) {
if orgID, set := os.LookupEnv("MONGODB_ATLAS_ORG_ID"); set {
args = append(args, "--orgId", orgID)
}

cmd := exec.Command(cliPath, args...)
cmd.Env = os.Environ()
resp, err := RunAndGetStdOut(cmd)
req.NoError(err, string(resp))
var projects admin.PaginatedAtlasGroup
req.NoError(json.Unmarshal(resp, &projects), string(resp))
t.Logf("projects:\n%s\n", resp)

for _, project := range projects.GetResults() {
projectID := project.GetId()
if projectID == os.Getenv("MONGODB_ATLAS_PROJECT_ID") {
t.Log("skip deleting default project", projectID)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

from the logs, we're not detecting this env variable and not skipping the projcect succesfully

// we have to clean up data federations from default project
// as this is the only project configured for data federation
// (has a configured awsRoleId)
Expand Down
81 changes: 68 additions & 13 deletions test/internal/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,9 @@

deletingState = "DELETING"

maxRetryAttempts = 10
sleepTimeInSeconds = 30
maxRetryAttempts = 10
sleepTimeInSeconds = 30
clusterDeletionTimeout = 30 * time.Minute

// CLI Plugins System constants.
examplePluginRepository = "mongodb/atlas-cli-plugin-example"
Expand Down Expand Up @@ -233,7 +234,32 @@
return nil
}

func deleteServerlessInstanceForProject(t *testing.T, cliPath, projectID, clusterName string) {
func removeTerminationProtectionFromServerlessInstance(projectID, serverlessInstanceName string) error {

Check failure on line 237 in test/internal/helper.go

View workflow job for this annotation

GitHub Actions / lint

func removeTerminationProtectionFromServerlessInstance is unused (unused)
cliPath, err := AtlasCLIBin()
if err != nil {
return err
}
args := []string{
serverlessEntity,
"update",
serverlessInstanceName,
"--disableTerminationProtection",
"-P",
ProfileName(),
}
if projectID != "" {
args = append(args, "--projectId", projectID)
}
updateCmd := exec.Command(cliPath, args...)
updateCmd.Env = os.Environ()
if resp, err := RunAndGetStdOut(updateCmd); err != nil {
return fmt.Errorf("error updating serverless instance %w: %s", err, string(resp))
}

return watchServerlessInstanceForProject(projectID, serverlessInstanceName)
}

func deleteServerlessInstanceForProject(t *testing.T, cliPath, projectID, clusterName string) error {
t.Helper()

args := []string{
Expand All @@ -249,10 +275,13 @@
}
deleteCmd := exec.Command(cliPath, args...)
deleteCmd.Env = os.Environ()
resp, err := RunAndGetStdOut(deleteCmd)
require.NoError(t, err, string(resp))
if resp, err := RunAndGetStdOut(deleteCmd); err != nil {
if !strings.Contains(err.Error(), "CANNOT_TERMINATE_SERVERLESS_INSTANCE_WHEN_TERMINATION_PROTECTION_ENABLED") {
return fmt.Errorf("error deleting serverless instance %w: %s", err, string(resp))
}
}

_ = watchServerlessInstanceForProject(projectID, clusterName)
return watchServerlessInstanceForProject(projectID, clusterName)
}

func deployClusterForProject(projectID, clusterName, tier, mDBVersion string, enableBackup bool) (string, error) {
Expand Down Expand Up @@ -389,19 +418,45 @@

func DeleteClusterForProject(projectID, clusterName string) error {
if err := internalDeleteClusterForProject(projectID, clusterName); err != nil {
if !strings.Contains(err.Error(), "CANNOT_TERMINATE_CLUSTER_WHEN_TERMINATION_PROTECTION_ENABLED") {
return err
if strings.Contains(err.Error(), "CLUSTER_NOT_FOUND") || strings.Contains(err.Error(), "GROUP_NOT_FOUND") {
return nil
}

if err := removeTerminationProtectionFromCluster(projectID, clusterName); err != nil {
return err
if strings.Contains(err.Error(), "CANNOT_TERMINATE_CLUSTER_WHEN_TERMINATION_PROTECTION_ENABLED") {
if err := removeTerminationProtectionFromCluster(projectID, clusterName); err != nil {
return err
}
return internalDeleteClusterForProject(projectID, clusterName)
}
return internalDeleteClusterForProject(projectID, clusterName)

return err
}

return nil
}

// DeleteClusterForProjectWithRetry retries the deletion of a cluster for a
// project if the error CLUSTER_ALREADY_REQUESTED_DELETION is encountered.
func DeleteClusterForProjectWithRetry(t *testing.T, projectID, clusterName string) error {
t.Helper()
backoff := 1
for attempts := 1; attempts <= maxRetryAttempts; attempts++ {
if err := DeleteClusterForProject(projectID, clusterName); err != nil {
if strings.Contains(err.Error(), "CLUSTER_ALREADY_REQUESTED_DELETION") {
t.Logf("%d/%d attempts - cluster %q already requested deletion, retrying in %d seconds...", attempts, maxRetryAttempts, clusterName, backoff)
time.Sleep(time.Duration(backoff) * time.Second)
backoff *= 2
continue
}

return fmt.Errorf("unexpected error while deleting cluster %q: %w", clusterName, err)
}

return nil
}
return fmt.Errorf("failed to delete cluster %q after %d attempts", clusterName, maxRetryAttempts)
}

func deleteDatalakeForProject(cliPath, projectID, id string) error {
args := []string{
datalakePipelineEntity,
Expand Down Expand Up @@ -746,7 +801,7 @@
_ = WatchCluster(projectID, clusterName)
return
}
assert.NoError(t, DeleteClusterForProject(projectID, clusterName))
assert.NoError(t, DeleteClusterForProjectWithRetry(t, projectID, clusterName))
})
}(cluster.GetName(), cluster.GetStateName())
}
Expand Down Expand Up @@ -1056,7 +1111,7 @@
_ = watchServerlessInstanceForProject(projectID, serverlessInstance)
return
}
deleteServerlessInstanceForProject(t, cliPath, projectID, serverlessInstance)
require.NoError(t, deleteServerlessInstanceForProject(t, cliPath, projectID, serverlessInstance))
})
}(serverless.GetName(), serverless.GetStateName())
}
Expand Down
Loading