-
Notifications
You must be signed in to change notification settings - Fork 209
fix: Handles updates of mongodbatlas_global_cluster_config
#3060
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 11 commits
6e179c0
fcc2ef2
5c697be
7cf1ee1
948af3f
1eb3538
86de531
8caeb9a
878a30d
3ba3c0f
2ed8856
146e7f2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,3 @@ | ||
| ```release-note:enhancement | ||
| resource/mongodbatlas_global_cluster_config: Supports update operation | ||
| ``` | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,6 +4,7 @@ import ( | |
| "context" | ||
| "errors" | ||
| "fmt" | ||
| "reflect" | ||
| "strings" | ||
| "time" | ||
|
|
||
|
|
@@ -211,9 +212,38 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di | |
| } | ||
|
|
||
| func resourceUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { | ||
| return diag.Errorf("Updating a global cluster configuration resource is not allowed as it would " + | ||
| "leave the index and shard key on the related collection in an inconsistent state.\n" + | ||
| "Please read our official documentation for more information.") | ||
| connV2 := meta.(*config.MongoDBClient).AtlasV2 | ||
| ids := conversion.DecodeStateID(d.Id()) | ||
| projectID := ids["project_id"] | ||
| clusterName := ids["cluster_name"] | ||
|
|
||
| if d.HasChange("managed_namespaces") { | ||
oarbusi marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| oldMN, newMN := d.GetChange("managed_namespaces") | ||
| oldList := oldMN.(*schema.Set).List() | ||
| newList := newMN.(*schema.Set).List() | ||
| if err := updateManagedNamespaces(ctx, connV2, projectID, clusterName, oldList, newList); err != nil { | ||
| return diag.FromErr(fmt.Errorf(errorGlobalClusterUpdate, clusterName, err)) | ||
| } | ||
| } | ||
|
|
||
| if d.HasChange("custom_zone_mappings") { | ||
| oldZN, newZN := d.GetChange("custom_zone_mappings") | ||
| oldSet := oldZN.(*schema.Set) | ||
| newSet := newZN.(*schema.Set) | ||
| if err := updateCustomZoneMappings(ctx, connV2, projectID, clusterName, oldSet, newSet); err != nil { | ||
| return diag.FromErr(fmt.Errorf(errorGlobalClusterUpdate, clusterName, err)) | ||
| } | ||
| } | ||
| return resourceRead(ctx, d, meta) | ||
| } | ||
|
|
||
| // convertInterfaceSlice is a helper function that converts []map[string]any into []any | ||
| func convertInterfaceSlice(input []map[string]any) []any { | ||
| var out []any | ||
| for _, v := range input { | ||
| out = append(out, v) | ||
| } | ||
| return out | ||
| } | ||
|
|
||
| func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { | ||
|
|
@@ -335,3 +365,103 @@ func newCustomZoneMappings(tfList []any) *[]admin.ZoneMapping { | |
|
|
||
| return &apiObjects | ||
| } | ||
|
|
||
| func addManagedNamespaces(ctx context.Context, connV2 *admin.APIClient, add []any, projectID, clusterName string) error { | ||
| for _, m := range add { | ||
| mn := m.(map[string]any) | ||
|
|
||
| addManagedNamespace := &admin.ManagedNamespaces{ | ||
| Collection: mn["collection"].(string), | ||
| Db: mn["db"].(string), | ||
| CustomShardKey: mn["custom_shard_key"].(string), | ||
oarbusi marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| } | ||
| if isCustomShardKeyHashed, okCustomShard := mn["is_custom_shard_key_hashed"]; okCustomShard { | ||
| addManagedNamespace.IsCustomShardKeyHashed = conversion.Pointer[bool](isCustomShardKeyHashed.(bool)) | ||
| } | ||
| if isShardKeyUnique, okShard := mn["is_shard_key_unique"]; okShard { | ||
| addManagedNamespace.IsShardKeyUnique = conversion.Pointer[bool](isShardKeyUnique.(bool)) | ||
| } | ||
| _, _, err := connV2.GlobalClustersApi.CreateManagedNamespace(ctx, projectID, clusterName, addManagedNamespace).Execute() | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What happens in case this errors out for a particular namespace? Right now I think it would just fail right away and not complete the loop. Should we instead capture errors from all calls and fail once?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think failing right away is okay, but I haven't thought about the alternative or if we should complete the loop in case of a single failure. No strong opinion from me on this
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm okay failing right away. Only thing to consider is users might run follow-up updates in those cases, and as long as that works as expected and doesn't run into any state inconsistencies this should be fine. |
||
| if err != nil { | ||
| return err | ||
| } | ||
| } | ||
| return nil | ||
| } | ||
|
|
||
| // buildManagedNamespacesMap converts a list of managed_namespace entries into a map keyed by "collection:db" | ||
| func buildManagedNamespacesMap(list []any) map[string]map[string]any { | ||
| namespacesMap := make(map[string]map[string]any) | ||
| for _, item := range list { | ||
| m := item.(map[string]any) | ||
| key := fmt.Sprintf("%s:%s", m["collection"].(string), m["db"].(string)) | ||
| namespacesMap[key] = m | ||
| } | ||
| return namespacesMap | ||
| } | ||
|
|
||
| // diffManagedNamespaces calculates the difference between old and new managed_namespaces. | ||
| // Returns slices of namespaces to add and remove; errors out on modifications. | ||
| func diffManagedNamespaces(oldList, newList []any) (toAdd, toRemove []map[string]any, err error) { | ||
| oldMap := buildManagedNamespacesMap(oldList) | ||
| newMap := buildManagedNamespacesMap(newList) | ||
| for key, oldEntry := range oldMap { | ||
| if newEntry, exists := newMap[key]; exists { | ||
| // Modification is not allowed. | ||
| if !reflect.DeepEqual(oldEntry, newEntry) { | ||
| return nil, nil, fmt.Errorf("managed namespace for collection '%s' in db '%s' cannot be modified", oldEntry["collection"], oldEntry["db"]) | ||
| } | ||
AgustinBettati marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| } else { | ||
| toRemove = append(toRemove, oldEntry) | ||
| } | ||
| } | ||
| for key, newEntry := range newMap { | ||
| if _, exists := oldMap[key]; !exists { | ||
| toAdd = append(toAdd, newEntry) | ||
| } | ||
| } | ||
| return toAdd, toRemove, nil | ||
| } | ||
|
|
||
| // updateManagedNamespaces encapsulates diffing and applying removals/additions. | ||
| func updateManagedNamespaces(ctx context.Context, connV2 *admin.APIClient, projectID, clusterName string, oldList, newList []any) error { | ||
| toAdd, toRemove, err := diffManagedNamespaces(oldList, newList) | ||
| if err != nil { | ||
| return err | ||
| } | ||
| if len(toRemove) > 0 { | ||
| if err := removeManagedNamespaces(ctx, connV2, convertInterfaceSlice(toRemove), projectID, clusterName); err != nil { | ||
| return err | ||
| } | ||
| } | ||
| if len(toAdd) > 0 { | ||
| if err := addManagedNamespaces(ctx, connV2, convertInterfaceSlice(toAdd), projectID, clusterName); err != nil { | ||
| return err | ||
| } | ||
| } | ||
AgustinBettati marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return nil | ||
| } | ||
|
|
||
| // updateCustomZoneMappings encapsulates diffing and applying changes for custom_zone_mappings. | ||
| func updateCustomZoneMappings(ctx context.Context, connV2 *admin.APIClient, projectID, clusterName string, oldSet, newSet *schema.Set) error { | ||
| removed := oldSet.Difference(newSet).List() | ||
| added := newSet.Difference(oldSet).List() | ||
|
|
||
| if len(removed) > 0 { | ||
| // Allow deletion only if all mappings are removed. | ||
| if newSet.Len() != 0 { | ||
| return fmt.Errorf("partial deletion of custom_zone_mappings is not allowed; remove either all mappings or none") | ||
| } | ||
| if _, _, err := connV2.GlobalClustersApi.DeleteAllCustomZoneMappings(ctx, projectID, clusterName).Execute(); err != nil { | ||
| return err | ||
| } | ||
| } | ||
| if len(added) > 0 { | ||
| if _, _, err := connV2.GlobalClustersApi.CreateCustomZoneMapping(ctx, projectID, clusterName, &admin.CustomZoneMappings{ | ||
| CustomZoneMappings: newCustomZoneMappings(added), | ||
| }).Execute(); err != nil { | ||
| return err | ||
| } | ||
| } | ||
| return nil | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think it would be easier to review if this was done in two PRs, one with the revert without any additional change, next PR with the changes, so we can spot better the real changes after the revert