Skip to content

Group role member feature #101

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Jun 15, 2020
38 changes: 21 additions & 17 deletions databricks/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,23 +26,27 @@ func Provider(version string) terraform.ResourceProvider {
"databricks_zones": dataSourceClusterZones(),
},
ResourcesMap: map[string]*schema.Resource{
"databricks_token": resourceToken(),
"databricks_secret_scope": resourceSecretScope(),
"databricks_secret": resourceSecret(),
"databricks_secret_acl": resourceSecretACL(),
"databricks_instance_pool": resourceInstancePool(),
"databricks_scim_user": resourceScimUser(),
"databricks_scim_group": resourceScimGroup(),
"databricks_notebook": resourceNotebook(),
"databricks_cluster": resourceCluster(),
"databricks_job": resourceJob(),
"databricks_dbfs_file": resourceDBFSFile(),
"databricks_dbfs_file_sync": resourceDBFSFileSync(),
"databricks_instance_profile": resourceInstanceProfile(),
"databricks_aws_s3_mount": resourceAWSS3Mount(),
"databricks_azure_blob_mount": resourceAzureBlobMount(),
"databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(),
"databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(),
"databricks_token": resourceToken(),
"databricks_secret_scope": resourceSecretScope(),
"databricks_secret": resourceSecret(),
"databricks_secret_acl": resourceSecretACL(),
"databricks_instance_pool": resourceInstancePool(),
"databricks_scim_user": resourceScimUser(),
"databricks_scim_group": resourceScimGroup(),
// Scim Group is split into multiple components for flexibility to pick and choose
"databricks_group": resourceGroup(),
"databricks_group_instance_profile": resourceGroupInstanceProfile(),
"databricks_group_member": resourceGroupMember(),
"databricks_notebook": resourceNotebook(),
"databricks_cluster": resourceCluster(),
"databricks_job": resourceJob(),
"databricks_dbfs_file": resourceDBFSFile(),
"databricks_dbfs_file_sync": resourceDBFSFileSync(),
"databricks_instance_profile": resourceInstanceProfile(),
"databricks_aws_s3_mount": resourceAWSS3Mount(),
"databricks_azure_blob_mount": resourceAzureBlobMount(),
"databricks_azure_adls_gen1_mount": resourceAzureAdlsGen1Mount(),
"databricks_azure_adls_gen2_mount": resourceAzureAdlsGen2Mount(),
// MWS (multiple workspaces) resources are only limited to AWS as azure already has a built in concept of MWS
"databricks_mws_credentials": resourceMWSCredentials(),
"databricks_mws_storage_configurations": resourceMWSStorageConfigurations(),
Expand Down
150 changes: 150 additions & 0 deletions databricks/resource_databricks_group.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
package databricks

import (
"log"

"github.com/databrickslabs/databricks-terraform/client/model"
"github.com/databrickslabs/databricks-terraform/client/service"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)

func resourceGroup() *schema.Resource {
return &schema.Resource{
Create: resourceGroupCreate,
Update: resourceGroupUpdate,
Read: resourceGroupRead,
Delete: resourceGroupDelete,

Schema: map[string]*schema.Schema{
"display_name": {
Type: schema.TypeString,
ForceNew: true,
Required: true,
},
"allow_cluster_create": {
Type: schema.TypeBool,
Optional: true,
},
"allow_instance_pool_create": {
Type: schema.TypeBool,
Optional: true,
},
},
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
}
}

func resourceGroupCreate(d *schema.ResourceData, m interface{}) error {
client := m.(*service.DBApiClient)
groupName := d.Get("display_name").(string)
allowClusterCreate := d.Get("allow_cluster_create").(bool)
allowInstancePoolCreate := d.Get("allow_instance_pool_create").(bool)

// If entitlement flags are set to be true
var entitlementsList []string
if allowClusterCreate {
entitlementsList = append(entitlementsList, string(model.AllowClusterCreateEntitlement))
}
if allowInstancePoolCreate {
entitlementsList = append(entitlementsList, string(model.AllowInstancePoolCreateEntitlement))
}

group, err := client.Groups().Create(groupName, nil, nil, entitlementsList)
if err != nil {
return err
}
d.SetId(group.ID)
return resourceGroupRead(d, m)
}

func resourceGroupRead(d *schema.ResourceData, m interface{}) error {
id := d.Id()
client := m.(*service.DBApiClient)
group, err := client.Groups().Read(id)
if err != nil {
if isScimGroupMissing(err.Error(), id) {
log.Printf("Missing scim group with id: %s.", id)
d.SetId("")
return nil
}
return err
}

err = d.Set("display_name", group.DisplayName)
if err != nil {
return err
}

err = d.Set("allow_cluster_create", isGroupClusterCreateEntitled(&group))
if err != nil {
return err
}

err = d.Set("allow_instance_pool_create", isGroupInstancePoolCreateEntitled(&group))
return err
}

func resourceGroupUpdate(d *schema.ResourceData, m interface{}) error {
id := d.Id()
client := m.(*service.DBApiClient)

// Handle entitlements update
var entitlementsAddList []string
var entitlementsRemoveList []string
// If allow_cluster_create has changed
if d.HasChange("allow_cluster_create") {
allowClusterCreate := d.Get("allow_cluster_create").(bool)
// Changed to true
if allowClusterCreate {
entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement))
}
// Changed to false
entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement))
}
// If allow_instance_pool_create has changed
if d.HasChange("allow_instance_pool_create") {
allowClusterCreate := d.Get("allow_instance_pool_create").(bool)
// Changed to true
if allowClusterCreate {
entitlementsAddList = append(entitlementsAddList, string(model.AllowClusterCreateEntitlement))
}
// Changed to false
entitlementsRemoveList = append(entitlementsRemoveList, string(model.AllowClusterCreateEntitlement))
}

if entitlementsAddList != nil || entitlementsRemoveList != nil {
err := client.Groups().Patch(id, entitlementsAddList, entitlementsRemoveList, model.GroupEntitlementsPath)
if err != nil {
return err
}
}

return nil
}

func resourceGroupDelete(d *schema.ResourceData, m interface{}) error {
id := d.Id()
client := m.(*service.DBApiClient)
err := client.Groups().Delete(id)
return err
}

func isGroupClusterCreateEntitled(group *model.Group) bool {
for _, entitlement := range group.Entitlements {
if entitlement.Value == model.AllowClusterCreateEntitlement {
return true
}
}
return false
}

func isGroupInstancePoolCreateEntitled(group *model.Group) bool {
for _, entitlement := range group.Entitlements {
if entitlement.Value == model.AllowClusterCreateEntitlement {
return true
}
}
return false
}
163 changes: 163 additions & 0 deletions databricks/resource_databricks_group_aws_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
package databricks

import (
"errors"
"fmt"
"testing"

"github.com/databrickslabs/databricks-terraform/client/model"
"github.com/databrickslabs/databricks-terraform/client/service"
"github.com/hashicorp/terraform-plugin-sdk/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/stretchr/testify/assert"
)

func TestAccAWSGroupResource(t *testing.T) {
var Group model.Group
// generate a random name for each tokenInfo test run, to avoid
// collisions from multiple concurrent tests.
// the acctest package includes many helpers such as RandStringFromCharSet
// See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest
//scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)
displayName := fmt.Sprintf("tf group test %s", randomStr)
newDisplayName := fmt.Sprintf("new tf group test %s", randomStr)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testAWSGroupResourceDestroy,
Steps: []resource.TestStep{
{
// use a dynamic configuration with the random name from above
Config: testAWSDatabricksGroup(displayName),
// compose a basic test, checking both remote and local values
Check: resource.ComposeTestCheckFunc(
// query the API to retrieve the tokenInfo object
testAWSGroupResourceExists("databricks_group.my_group", &Group, t),
// verify remote values
testAWSGroupValues(t, &Group, displayName),
// verify local values
resource.TestCheckResourceAttr("databricks_group.my_group", "display_name", displayName),
),
Destroy: false,
},
{
// use a dynamic configuration with the random name from above
Config: testAWSDatabricksGroup(newDisplayName),
// test to see if new resource is attempted to be planned
PlanOnly: true,
ExpectNonEmptyPlan: true,
Destroy: false,
},
{
ResourceName: "databricks_group.my_group",
ImportState: true,
ImportStateVerify: true,
},
},
})
}

func TestAccAWSGroupResource_verify_entitlements(t *testing.T) {
var Group model.Group
// generate a random name for each tokenInfo test run, to avoid
// collisions from multiple concurrent tests.
// the acctest package includes many helpers such as RandStringFromCharSet
// See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest
//scope := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
randomStr := acctest.RandStringFromCharSet(5, acctest.CharSetAlphaNum)
displayName := fmt.Sprintf("tf group test %s", randomStr)
newDisplayName := fmt.Sprintf("new tf group test %s", randomStr)
resource.Test(t, resource.TestCase{
Providers: testAccProviders,
CheckDestroy: testAWSGroupResourceDestroy,
Steps: []resource.TestStep{
{
// use a dynamic configuration with the random name from above
Config: testAWSDatabricksGroupEntitlements(displayName, "true", "true"),
// compose a basic test, checking both remote and local values
Check: resource.ComposeTestCheckFunc(
// query the API to retrieve the tokenInfo object
testAWSGroupResourceExists("databricks_group.my_group", &Group, t),
// verify remote values
testAWSGroupValues(t, &Group, displayName),
// verify local values
resource.TestCheckResourceAttr("databricks_group.my_group", "allow_cluster_create", "true"),
resource.TestCheckResourceAttr("databricks_group.my_group", "allow_instance_pool_create", "true"),
),
Destroy: false,
},
// Remove entitlements and expect a non empty plan
{
// use a dynamic configuration with the random name from above
Config: testAWSDatabricksGroup(newDisplayName),
// test to see if new resource is attempted to be planned
PlanOnly: true,
ExpectNonEmptyPlan: true,
Destroy: false,
},
},
})
}

func testAWSGroupResourceDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*service.DBApiClient)
for _, rs := range s.RootModule().Resources {
if rs.Type != "databricks_group" {
continue
}
_, err := client.Users().Read(rs.Primary.ID)
if err != nil {
return nil
}
return errors.New("resource Group is not cleaned up")
}
return nil
}

func testAWSGroupValues(t *testing.T, group *model.Group, displayName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
assert.True(t, group.DisplayName == displayName)
return nil
}
}

// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget.
func testAWSGroupResourceExists(n string, group *model.Group, t *testing.T) resource.TestCheckFunc {
return func(s *terraform.State) error {
// find the corresponding state object
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}

// retrieve the configured client from the test setup
conn := testAccProvider.Meta().(*service.DBApiClient)
resp, err := conn.Groups().Read(rs.Primary.ID)
if err != nil {
return err
}

// If no error, assign the response Widget attribute to the widget pointer
*group = resp
return nil
}
}

func testAWSDatabricksGroup(groupName string) string {
return fmt.Sprintf(`
resource "databricks_group" "my_group" {
display_name = "%s"
}
`, groupName)
}

func testAWSDatabricksGroupEntitlements(groupName, allowClusterCreate, allowPoolCreate string) string {
return fmt.Sprintf(`
resource "databricks_group" "my_group" {
display_name = "%s"
allow_cluster_create = %s
allow_instance_pool_create = %s
}
`, groupName, allowClusterCreate, allowPoolCreate)
}
Loading