Skip to content

INTMDB-710: Serverless Instance wants to do an in-place update on every run #1152

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
May 3, 2023
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func integrationToSchema(d *schema.ResourceData, integration *matlas.ThirdPartyI
"org_name": integration.OrgName,
"url": integrationSchema.URL,
"secret": integrationSchema.Secret,
"microsoft_teams_webhook_url": integrationSchema.MicrosoftTeamsWebhookURL,
"microsoft_teams_webhook_url": integration.MicrosoftTeamsWebhookURL,
"user_name": integrationSchema.UserName,
"password": integrationSchema.Password,
"service_discovery": integration.ServiceDiscovery,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
)

func TestAccConfigDSThirdPartyIntegrations_basic(t *testing.T) {
SkipTest(t)
var (
projectID = os.Getenv("MONGODB_ATLAS_PROJECT_ID")
intgTypes = []string{"NEW_RELIC", "OPS_GENIE", "DATADOG", "VICTOR_OPS", "WEBHOOK", "PROMETHEUS"}
Expand Down
18 changes: 18 additions & 0 deletions mongodbatlas/resource_mongodbatlas_backup_compliance_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ func resourceMongoDBAtlasBackupCompliancePolicyCreate(ctx context.Context, d *sc

backupCompliancePolicyReq.EncryptionAtRestEnabled = pointy.Bool(d.Get("encryption_at_rest_enabled").(bool))

backupCompliancePolicyReq.PitEnabled = pointy.Bool(d.Get("pit_enabled").(bool))

backupCompliancePolicyReq.RestoreWindowDays = pointy.Int64(cast.ToInt64(d.Get("restore_window_days")))

backupCompliancePolicyReq.OnDemandPolicyItem = *expandDemandBackupPolicyItem(d)
Expand Down Expand Up @@ -333,6 +335,18 @@ func resourceMongoDBAtlasBackupCompliancePolicyRead(ctx context.Context, d *sche
return diag.FromErr(fmt.Errorf(errorBackupPolicySetting, "state", projectID, err))
}

if err := d.Set("copy_protection_enabled", backupPolicy.CopyProtectionEnabled); err != nil {
return diag.FromErr(fmt.Errorf(errorBackupPolicySetting, "copy_protection_enabled", projectID, err))
}

if err := d.Set("encryption_at_rest_enabled", backupPolicy.EncryptionAtRestEnabled); err != nil {
return diag.FromErr(fmt.Errorf(errorBackupPolicySetting, "encryption_at_rest_enabled", projectID, err))
}

if err := d.Set("pit_enabled", backupPolicy.PitEnabled); err != nil {
return diag.FromErr(fmt.Errorf(errorBackupPolicySetting, "pit_enabled", projectID, err))
}

if err := d.Set("on_demand_policy_item", flattenOnDemandBackupPolicyItem(backupPolicy.OnDemandPolicyItem)); err != nil {
return diag.FromErr(fmt.Errorf(errorBackupPolicySetting, "scheduled_policy_items", projectID, err))
}
Expand Down Expand Up @@ -384,6 +398,10 @@ func resourceMongoDBAtlasBackupCompliancePolicyUpdate(ctx context.Context, d *sc
backupCompliancePolicyUpdate.CopyProtectionEnabled = pointy.Bool(d.Get("copy_protection_enabled").(bool))
}

if d.HasChange("pit_enabled") {
backupCompliancePolicyUpdate.PitEnabled = pointy.Bool(d.Get("pit_enabled").(bool))
}

if d.HasChange("restore_window_days") {
backupCompliancePolicyUpdate.RestoreWindowDays = pointy.Int64(cast.ToInt64(d.Get("restore_window_days")))
}
Expand Down
8 changes: 4 additions & 4 deletions mongodbatlas/resource_mongodbatlas_serverless_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,11 +259,11 @@ func resourceMongoDBAtlasServerlessInstanceRead(ctx context.Context, d *schema.R
if err := d.Set("connection_strings_standard_srv", serverlessInstance.ConnectionStrings.StandardSrv); err != nil {
return diag.Errorf("error setting `connection_strings_standard_srv` for serverless instance (%s): %s", d.Id(), err)
}
if len(serverlessInstance.ConnectionStrings.PrivateEndpoint) > 0 {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This if was the bug as we want to populate the ConnectionStrings.PrivateEndpoint with an empty array

if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(serverlessInstance.ConnectionStrings.PrivateEndpoint)); err != nil {
return diag.Errorf("error setting `connection_strings_private_endpoint_srv` for serverless instance (%s): %s", d.Id(), err)
}

if err := d.Set("connection_strings_private_endpoint_srv", flattenSRVConnectionString(serverlessInstance.ConnectionStrings.PrivateEndpoint)); err != nil {
return diag.Errorf("error setting `connection_strings_private_endpoint_srv` for serverless instance (%s): %s", d.Id(), err)
}

if err := d.Set("create_date", serverlessInstance.CreateDate); err != nil {
return diag.Errorf("error setting `create_date` for serverless instance (%s): %s", d.Id(), err)
}
Expand Down
17 changes: 16 additions & 1 deletion mongodbatlas/resource_mongodbatlas_serverless_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ func TestAccClusterRSServerlessInstance_basic(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "project_id", projectID),
resource.TestCheckResourceAttr(resourceName, "name", instanceName),
resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", "false"),
testAccCheckConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName),
),
},
},
Expand Down Expand Up @@ -114,11 +115,25 @@ func testAccCheckMongoDBAtlasServerlessInstanceImportStateIDFunc(resourceName st
}

ids := decodeStateID(rs.Primary.ID)

return fmt.Sprintf("%s-%s", ids["project_id"], ids["name"]), nil
}
}

func testAccCheckConnectionStringPrivateEndpointIsPresentWithNoElement(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return fmt.Errorf("not found: %s", resourceName)
}

if connectionStringPrivateEndpoint := rs.Primary.Attributes["connection_strings_private_endpoint_srv.#"]; connectionStringPrivateEndpoint == "" {
return fmt.Errorf("expected connection_strings_private_endpoint_srv to be present")
}

return nil
}
}

func testAccMongoDBAtlasServerlessInstanceConfig(projectID, name string, ignoreConnectionStrings bool) string {
lifecycle := ""

Expand Down
13 changes: 12 additions & 1 deletion website/docs/r/advanced_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -370,10 +370,21 @@ replication_specs {
### auto_scaling

* `disk_gb_enabled` - (Optional) Flag that indicates whether this cluster enables disk auto-scaling. This parameter defaults to true.
- Set to `true` to enable disk auto-scaling.
- Set to `false` to disable disk auto-scaling.

~> **IMPORTANT:** If `disk_gb_enabled` is true, then Atlas will automatically scale disk size up and down.
This will cause the value of `disk_size_gb` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster disk size back to the original `disk_size_gb` value.
To prevent this a lifecycle customization should be used, i.e.:
`lifecycle {
ignore_changes = [disk_size_gb]
}`
After adding the `lifecycle` block to explicitly change `disk_size_gb` comment out the `lifecycle` block and run `terraform apply`. Please be sure to uncomment the `lifecycle` block once done to prevent any accidental changes.

* `compute_enabled` - (Optional) Flag that indicates whether instance size auto-scaling is enabled. This parameter defaults to false.

~> **IMPORTANT:** If `compute_enabled` is true, then Atlas will automatically scale up to the maximum provided and down to the minimum, if provided.
This will cause the value of `instance_size` returned to potential be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back down to the original `instance_size` value.
This will cause the value of `instance_size` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back to the original `instance_size` value.
To prevent this a lifecycle customization should be used, i.e.:
`lifecycle {
ignore_changes = [instance_size]
Expand Down
26 changes: 13 additions & 13 deletions website/docs/r/backup_compliance_policy.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ resource "mongodbatlas_cluster" "my_cluster" {
provider_name = "AWS"
provider_region_name = "EU_CENTRAL_1"
provider_instance_size_name = "M10"
provider_backup_enabled = true // enable cloud backup snapshots
cloud_backup = true // enable cloud backup snapshots
}

resource "mongodbatlas_cloud_backup_schedule" "test" {
Expand All @@ -44,23 +44,23 @@ resource "mongodbatlas_cloud_backup_schedule" "test" {
policy_item_hourly {
frequency_interval = 1 #accepted values = 1, 2, 4, 6, 8, 12 -> every n hours
retention_unit = "days"
retention_value = 1
retention_value = 7
}
policy_item_daily {
frequency_interval = 1 #accepted values = 1 -> every 1 day
retention_unit = "days"
retention_value = 2
retention_value = 7
}
policy_item_weekly {
frequency_interval = 4 # accepted values = 1 to 7 -> every 1=Monday,2=Tuesday,3=Wednesday,4=Thursday,5=Friday,6=Saturday,7=Sunday day of the week
frequency_interval = 1 # accepted values = 1 to 7 -> every 1=Monday,2=Tuesday,3=Wednesday,4=Thursday,5=Friday,6=Saturday,7=Sunday day of the week
retention_unit = "weeks"
retention_value = 3
retention_value = 4
}
policy_item_monthly {
frequency_interval = 5 # accepted values = 1 to 28 -> 1 to 28 every nth day of the month
frequency_interval = 1 # accepted values = 1 to 28 -> 1 to 28 every nth day of the month
# accepted values = 40 -> every last day of the month
retention_unit = "months"
retention_value = 4
retention_value = 12
}

}
Expand All @@ -71,7 +71,7 @@ data "mongodbatlas_cloud_backup_schedule" "test" {
}

data "mongodbatlas_backup_compliance_policy" "backup_policy" {
project_id = mongodbatlas_cloud_backup_schedule.test.id
project_id = mongodbatlas_cloud_backup_schedule.test.project_id
}

resource "mongodbatlas_backup_compliance_policy" "backup_policy" {
Expand All @@ -84,31 +84,31 @@ resource "mongodbatlas_backup_compliance_policy" "backup_policy" {
restore_window_days = 7

on_demand_policy_item {
frequency_interval = 0
frequency_interval = 1
retention_unit = "days"
retention_value = 3
}

policy_item_hourly {
frequency_interval = 6
frequency_interval = 1
retention_unit = "days"
retention_value = 7
}

policy_item_daily {
frequency_interval = 0
frequency_interval = 1
retention_unit = "days"
retention_value = 7
}

policy_item_weekly {
frequency_interval = 0
frequency_interval = 1
retention_unit = "weeks"
retention_value = 4
}

policy_item_monthly {
frequency_interval = 0
frequency_interval = 1
retention_unit = "months"
retention_value = 12
}
Expand Down
10 changes: 9 additions & 1 deletion website/docs/r/cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -287,14 +287,22 @@ Refer to the following for full privatelink endpoint connection string examples:
- Set to `true` to enable disk auto-scaling.
- Set to `false` to disable disk auto-scaling.

~> **IMPORTANT:** If `disk_gb_enabled` is true, then Atlas will automatically scale disk size up and down.
This will cause the value of `disk_size_gb` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster disk size back to the original `disk_size_gb` value.
To prevent this a lifecycle customization should be used, i.e.:
`lifecycle {
ignore_changes = [disk_size_gb]
}`
After adding the `lifecycle` block to explicitly change `disk_size_gb` comment out the `lifecycle` block and run `terraform apply`. Please be sure to uncomment the `lifecycle` block once done to prevent any accidental changes.

-> **NOTE:** If `provider_name` is set to `TENANT`, the parameter `auto_scaling_disk_gb_enabled` will be ignored.

* `auto_scaling_compute_enabled` - (Optional) Specifies whether cluster tier auto-scaling is enabled. The default is false.
- Set to `true` to enable cluster tier auto-scaling. If enabled, you must specify a value for `providerSettings.autoScaling.compute.maxInstanceSize`.
- Set to `false` to disable cluster tier auto-scaling.

~> **IMPORTANT:** If `auto_scaling_compute_enabled` is true, then Atlas will automatically scale up to the maximum provided and down to the minimum, if provided.
This will cause the value of `provider_instance_size_name` returned to potential be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back down to the original instanceSizeName value.
This will cause the value of `provider_instance_size_name` returned to potentially be different than what is specified in the Terraform config and if one then applies a plan, not noting this, Terraform will scale the cluster back to the original instanceSizeName value.
To prevent this a lifecycle customization should be used, i.e.:
`lifecycle {
ignore_changes = [provider_instance_size_name]
Expand Down