Skip to content

Commit 4ef7b70

Browse files
committed
python formatting and remove s from scala non formatted string
1 parent 2a91430 commit 4ef7b70

File tree

2 files changed

+11
-28
lines changed

2 files changed

+11
-28
lines changed

api/py/ai/chronon/repo/run.py

Lines changed: 10 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -214,9 +214,7 @@ def download_jar(
214214
spark_version="2.4.0",
215215
skip_download=False,
216216
):
217-
assert (
218-
spark_version in SUPPORTED_SPARK
219-
), f"Received unsupported spark version {spark_version}. Supported spark versions are {SUPPORTED_SPARK}"
217+
assert (spark_version in SUPPORTED_SPARK), f"Received unsupported spark version {spark_version}. Supported spark versions are {SUPPORTED_SPARK}"
220218
scala_version = SCALA_VERSION_FOR_SPARK[spark_version]
221219
maven_url_prefix = os.environ.get("CHRONON_MAVEN_MIRROR_PREFIX", None)
222220
default_url_prefix = (
@@ -372,8 +370,7 @@ def set_runtime_env(params):
372370
for k in [
373371
"chronon",
374372
conf_type,
375-
params["mode"].replace(
376-
"-", "_") if params["mode"] else None,
373+
params["mode"].replace("-", "_") if params["mode"] else None,
377374
]
378375
if k is not None
379376
]
@@ -432,9 +429,7 @@ def __init__(self, args, jar_path):
432429
raise e
433430
possible_modes = list(
434431
ROUTES[self.conf_type].keys()) + UNIVERSAL_ROUTES
435-
assert (
436-
args["mode"] in possible_modes
437-
), "Invalid mode:{} for conf:{} of type:{}, please choose from {}".format(
432+
assert (args["mode"] in possible_modes), "Invalid mode:{} for conf:{} of type:{}, please choose from {}".format(
438433
args["mode"], self.conf, self.conf_type, possible_modes
439434
)
440435
else:
@@ -518,9 +513,7 @@ def run(self):
518513
)
519514
)
520515
if self.mode == "streaming":
521-
assert (
522-
len(filtered_apps) == 1
523-
), "More than one found, please kill them all"
516+
assert (len(filtered_apps) == 1), "More than one found, please kill them all"
524517
print("All good. No need to start a new app.")
525518
return
526519
elif self.mode == "streaming-client":
@@ -556,10 +549,7 @@ def run(self):
556549
)
557550
for start_ds, end_ds in date_ranges:
558551
if not self.dataproc:
559-
command = (
560-
"bash {script} --class ai.chronon.spark.Driver {jar} {subcommand} {args} "
561-
+ "{additional_args}"
562-
).format(
552+
command = ("bash {script} --class ai.chronon.spark.Driver {jar} {subcommand} {args} " + "{additional_args}").format(
563553
script=self.spark_submit,
564554
jar=self.jar_path,
565555
subcommand=ROUTES[self.conf_type][self.mode],
@@ -593,9 +583,6 @@ def run(self):
593583
if self.conf:
594584
local_files_to_upload_to_gcs.append(
595585
self.conf)
596-
# upload teams.json to gcs
597-
local_files_to_upload_to_gcs.append(
598-
get_teams_json_file_path(self.repo))
599586

600587
dataproc_command = generate_dataproc_submitter_args(
601588
local_files_to_upload_to_gcs=[self.conf],
@@ -608,8 +595,8 @@ def run(self):
608595
else:
609596
if not self.dataproc:
610597
command = (
611-
"bash {script} --class ai.chronon.spark.Driver {jar} {subcommand} {args} " +
612-
"{additional_args}"
598+
"bash {script} --class ai.chronon.spark.Driver {jar} {subcommand} {args} " +
599+
"{additional_args}"
613600
).format(
614601
script=self.spark_submit,
615602
jar=self.jar_path,
@@ -641,10 +628,6 @@ def run(self):
641628
if self.conf:
642629
local_files_to_upload_to_gcs.append(self.conf)
643630

644-
# upload teams.json to gcs
645-
local_files_to_upload_to_gcs.append(
646-
get_teams_json_file_path(self.repo))
647-
648631
dataproc_command = generate_dataproc_submitter_args(
649632
# for now, self.conf is the only local file that requires uploading to gcs
650633
local_files_to_upload_to_gcs=local_files_to_upload_to_gcs,
@@ -677,7 +660,7 @@ def _gen_final_args(self, start_ds=None, end_ds=None, override_conf_path=None):
677660
)
678661

679662
final_args = base_args + " " + \
680-
str(self.args) + override_start_partition_arg
663+
str(self.args) + override_start_partition_arg
681664

682665
return final_args
683666

@@ -692,8 +675,8 @@ def split_date_range(start_date, end_date, parallelism):
692675
if start_date > end_date:
693676
raise ValueError("Start date should be earlier than end date")
694677
total_days = (
695-
end_date - start_date
696-
).days + 1 # +1 to include the end_date in the range
678+
end_date - start_date
679+
).days + 1 # +1 to include the end_date in the range
697680

698681
# Check if parallelism is greater than total_days
699682
if parallelism > total_days:

cloud_gcp/src/main/scala/ai/chronon/integrations/cloud_gcp/DataprocSubmitter.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ object DataprocSubmitter {
189189
val bigtableInstanceId = sys.env.getOrElse("GCP_BIGTABLE_INSTANCE_ID", "")
190190

191191
val gcpArgsToPass = Array.apply(
192-
s"--is-gcp",
192+
"--is-gcp",
193193
s"--gcp-project-id=${projectId}",
194194
s"--gcp-bigtable-instance-id=$bigtableInstanceId"
195195
)

0 commit comments

Comments
 (0)