Skip to content

Commit 0a932c2

Browse files
pieterntanmay-db
andauthored
[Fix] Make spark_version field optional to work with defaults in policies (#4643)
## Changes A cluster policy can enforce a specific `spark_version` field and set it as the default. This mechanism allows for a centralized choice of the Databricks Runtime version across all jobs. To allow a job to inherit this field from a policy, it must be configured as optional in the schema. The job resource referred to `JobSettings` and `JobSettingsResource` with a mix of `js` and `jsr` variable names. This PR updates references to `JobSettingsResource` to be called `jsr`. ## Tests - [x] `make test` run locally - [ ] relevant change in `docs/` folder --------- Co-authored-by: Tanmay Rustagi <[email protected]>
1 parent 01a758b commit 0a932c2

File tree

5 files changed

+82
-21
lines changed

5 files changed

+82
-21
lines changed

NEXT_CHANGELOG.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@
77
* Add support for `power_bi_task` in jobs ([#4647](https://github.com/databricks/terraform-provider-databricks/pull/4647))
88
* Add support for `dashboard_task` in jobs ([#4646](https://github.com/databricks/terraform-provider-databricks/pull/4646))
99
* Add `compute_mode` to `databricks_mws_workspaces` to support creating serverless workspaces ([#4670](https://github.com/databricks/terraform-provider-databricks/pull/4670)).
10-
10+
* Make `spark_version` optional in the context of jobs such that a cluster policy can provide a default value ([#4643](https://github.com/databricks/terraform-provider-databricks/pull/4643))
11+
1112
### Bug Fixes
1213

1314
### Documentation

clusters/clusters_api.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ type Cluster struct {
399399
ClusterID string `json:"cluster_id,omitempty"`
400400
ClusterName string `json:"cluster_name,omitempty"`
401401

402-
SparkVersion string `json:"spark_version"`
402+
SparkVersion string `json:"spark_version,omitempty"`
403403
NumWorkers int32 `json:"num_workers" tf:"group:size"`
404404
Autoscale *AutoScale `json:"autoscale,omitempty" tf:"group:size"`
405405
EnableElasticDisk bool `json:"enable_elastic_disk,omitempty" tf:"computed"`

clusters/resource_cluster.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -314,6 +314,7 @@ func (ClusterSpec) CustomizeSchemaResourceSpecific(s *common.CustomizableSchema)
314314
Optional: true,
315315
Default: 60,
316316
})
317+
s.SchemaPath("spark_version").SetRequired()
317318
return s
318319
}
319320

@@ -356,7 +357,6 @@ func (ClusterSpec) CustomizeSchema(s *common.CustomizableSchema) *common.Customi
356357
s.SchemaPath("cluster_log_conf", "dbfs", "destination").SetRequired()
357358
s.SchemaPath("cluster_log_conf", "s3", "destination").SetRequired()
358359
s.SchemaPath("cluster_log_conf", "volumes", "destination").SetRequired()
359-
s.SchemaPath("spark_version").SetRequired()
360360
s.AddNewField("cluster_id", &schema.Schema{
361361
Type: schema.TypeString,
362362
Computed: true,

jobs/job_test.go

+61
Original file line numberDiff line numberDiff line change
@@ -462,3 +462,64 @@ func TestAccJobDashboardTask(t *testing.T) {
462462
}`,
463463
})
464464
}
465+
466+
func TestAccJobClusterPolicySparkVersion(t *testing.T) {
467+
acceptance.WorkspaceLevel(t, acceptance.Step{
468+
Template: `
469+
data "databricks_current_user" "me" {}
470+
data "databricks_spark_version" "latest" {}
471+
data "databricks_node_type" "smallest" {
472+
local_disk = true
473+
}
474+
resource "databricks_notebook" "this" {
475+
path = "${data.databricks_current_user.me.home}/Terraform{var.RANDOM}"
476+
language = "PYTHON"
477+
content_base64 = base64encode(<<-EOT
478+
# created from ${abspath(path.module)}
479+
display(spark.range(10))
480+
EOT
481+
)
482+
}
483+
resource "databricks_cluster_policy" "this" {
484+
name = "test-policy-{var.RANDOM}"
485+
definition = jsonencode({
486+
"spark_version": {
487+
"type": "fixed",
488+
"value": data.databricks_spark_version.latest.id
489+
}
490+
})
491+
}
492+
resource "databricks_job" "this" {
493+
name = "test-job-{var.RANDOM}"
494+
job_cluster {
495+
job_cluster_key = "test-cluster"
496+
new_cluster {
497+
num_workers = 0
498+
node_type_id = data.databricks_node_type.smallest.id
499+
custom_tags = {
500+
"ResourceClass" = "SingleNode"
501+
}
502+
spark_conf = {
503+
"spark.databricks.cluster.profile" : "singleNode"
504+
"spark.master" : "local[*,4]"
505+
}
506+
507+
// Apply the cluster policy to the job cluster for the Spark version.
508+
policy_id = databricks_cluster_policy.this.id
509+
apply_policy_default_values = true
510+
}
511+
}
512+
task {
513+
task_key = "test-task"
514+
job_cluster_key = "test-cluster"
515+
notebook_task {
516+
notebook_path = databricks_notebook.this.path
517+
}
518+
}
519+
}
520+
`,
521+
// The configuration uses "apply_policy_default_values = true" to set the Spark version.
522+
// This means permanent drift will occur for the values sourced from the policy.
523+
ExpectNonEmptyPlan: true,
524+
})
525+
}

jobs/resource_job.go

+17-18
Original file line numberDiff line numberDiff line change
@@ -334,10 +334,6 @@ type JobSettings struct {
334334
EditMode jobs.JobEditMode `json:"edit_mode,omitempty"`
335335
}
336336

337-
func (js *JobSettings) isMultiTask() bool {
338-
return js.Format == "MULTI_TASK" || len(js.Tasks) > 0
339-
}
340-
341337
func (js *JobSettings) sortTasksByKey() {
342338
sort.Slice(js.Tasks, func(i, j int) bool {
343339
return js.Tasks[i].TaskKey < js.Tasks[j].TaskKey
@@ -1040,9 +1036,9 @@ var jobsGoSdkSchema = common.StructToSchema(JobSettingsResource{}, nil)
10401036

10411037
func ResourceJob() common.Resource {
10421038
getReadCtx := func(ctx context.Context, d *schema.ResourceData) context.Context {
1043-
var js JobSettingsResource
1044-
common.DataToStructPointer(d, jobsGoSdkSchema, &js)
1045-
if js.isMultiTask() {
1039+
var jsr JobSettingsResource
1040+
common.DataToStructPointer(d, jobsGoSdkSchema, &jsr)
1041+
if jsr.isMultiTask() {
10461042
return context.WithValue(ctx, common.Api, common.API_2_1)
10471043
}
10481044
return ctx
@@ -1055,27 +1051,27 @@ func ResourceJob() common.Resource {
10551051
Update: schema.DefaultTimeout(clusters.DefaultProvisionTimeout),
10561052
},
10571053
CustomizeDiff: func(ctx context.Context, d *schema.ResourceDiff) error {
1058-
var js JobSettingsResource
1059-
common.DiffToStructPointer(d, jobsGoSdkSchema, &js)
1054+
var jsr JobSettingsResource
1055+
common.DiffToStructPointer(d, jobsGoSdkSchema, &jsr)
10601056
alwaysRunning := d.Get("always_running").(bool)
1061-
if alwaysRunning && js.MaxConcurrentRuns > 1 {
1057+
if alwaysRunning && jsr.MaxConcurrentRuns > 1 {
10621058
return fmt.Errorf("`always_running` must be specified only with `max_concurrent_runs = 1`")
10631059
}
10641060
controlRunState := d.Get("control_run_state").(bool)
10651061
if controlRunState {
1066-
if js.Continuous == nil {
1062+
if jsr.Continuous == nil {
10671063
return fmt.Errorf("`control_run_state` must be specified only with `continuous`")
10681064
}
1069-
if js.MaxConcurrentRuns > 1 {
1065+
if jsr.MaxConcurrentRuns > 1 {
10701066
return fmt.Errorf("`control_run_state` must be specified only with `max_concurrent_runs = 1`")
10711067
}
10721068
}
10731069
return nil
10741070
},
10751071
Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
1076-
var js JobSettings
1077-
common.DataToStructPointer(d, jobsGoSdkSchema, &js)
1078-
if js.isMultiTask() {
1072+
var jsr JobSettingsResource
1073+
common.DataToStructPointer(d, jobsGoSdkSchema, &jsr)
1074+
if jsr.isMultiTask() {
10791075
// Api 2.1
10801076
w, err := c.WorkspaceClient()
10811077
if err != nil {
@@ -1096,6 +1092,9 @@ func ResourceJob() common.Resource {
10961092
} else {
10971093
// Api 2.0
10981094
// TODO: Deprecate and remove this code path
1095+
var js JobSettings
1096+
common.DataToStructPointer(d, jobsGoSdkSchema, &js)
1097+
10991098
jobsAPI := NewJobsAPI(ctx, c)
11001099
job, err := jobsAPI.Create(js)
11011100
if err != nil {
@@ -1106,9 +1105,9 @@ func ResourceJob() common.Resource {
11061105
}
11071106
},
11081107
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
1109-
var js JobSettingsResource
1110-
common.DataToStructPointer(d, jobsGoSdkSchema, &js)
1111-
if js.isMultiTask() {
1108+
var jsr JobSettingsResource
1109+
common.DataToStructPointer(d, jobsGoSdkSchema, &jsr)
1110+
if jsr.isMultiTask() {
11121111
// Api 2.1
11131112
w, err := c.WorkspaceClient()
11141113
if err != nil {

0 commit comments

Comments
 (0)