@@ -612,8 +612,8 @@ def _run_node(
612
612
max_retries = node_instance .node_data .retry_config .max_retries
613
613
retry_interval = node_instance .node_data .retry_config .retry_interval_seconds
614
614
retries = 0
615
- shoudl_continue_retry = True
616
- while shoudl_continue_retry and retries <= max_retries :
615
+ should_continue_retry = True
616
+ while should_continue_retry and retries <= max_retries :
617
617
try :
618
618
# run node
619
619
retry_start_at = datetime .now (UTC ).replace (tzinfo = None )
@@ -692,7 +692,7 @@ def _run_node(
692
692
parent_parallel_id = parent_parallel_id ,
693
693
parent_parallel_start_node_id = parent_parallel_start_node_id ,
694
694
)
695
- shoudl_continue_retry = False
695
+ should_continue_retry = False
696
696
else :
697
697
yield NodeRunFailedEvent (
698
698
error = route_node_state .failed_reason or "Unknown error." ,
@@ -706,7 +706,7 @@ def _run_node(
706
706
parent_parallel_id = parent_parallel_id ,
707
707
parent_parallel_start_node_id = parent_parallel_start_node_id ,
708
708
)
709
- shoudl_continue_retry = False
709
+ should_continue_retry = False
710
710
elif run_result .status == WorkflowNodeExecutionStatus .SUCCEEDED :
711
711
if node_instance .should_continue_on_error and self .graph .edge_mapping .get (
712
712
node_instance .node_id
@@ -758,7 +758,7 @@ def _run_node(
758
758
parent_parallel_id = parent_parallel_id ,
759
759
parent_parallel_start_node_id = parent_parallel_start_node_id ,
760
760
)
761
- shoudl_continue_retry = False
761
+ should_continue_retry = False
762
762
763
763
break
764
764
elif isinstance (item , RunStreamChunkEvent ):
0 commit comments