@@ -28,7 +28,11 @@ fab_logging_level = WARN
28
28
logging_config_class =
29
29
30
30
# Log format
31
- # we need to escape the curly braces by adding an additional curly brace
31
+ # Colour the logs when the controlling terminal is a TTY.
32
+ colored_console_log = True
33
+ colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
34
+ colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
35
+
32
36
log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s
33
37
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
34
38
@@ -54,16 +58,26 @@ executor = SequentialExecutor
54
58
# their website
55
59
# sql_alchemy_conn = sqlite:////tmp/airflow.db
56
60
57
- # If SqlAlchemy should pool database connections.
58
- sql_alchemy_pool_enabled = True
59
-
60
61
# The encoding for the databases
61
62
sql_engine_encoding = utf-8
62
63
64
+ # If SqlAlchemy should pool database connections.
65
+ sql_alchemy_pool_enabled = True
66
+
63
67
# The SqlAlchemy pool size is the maximum number of database connections
64
68
# in the pool. 0 indicates no limit.
65
69
sql_alchemy_pool_size = 5
66
70
71
+ # The maximum overflow size of the pool.
72
+ # When the number of checked-out connections reaches the size set in pool_size,
73
+ # additional connections will be returned up to this limit.
74
+ # When those additional connections are returned to the pool, they are disconnected and discarded.
75
+ # It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow,
76
+ # and the total number of "sleeping" connections the pool will allow is pool_size.
77
+ # max_overflow can be set to -1 to indicate no overflow limit;
78
+ # no limit will be placed on the total number of concurrent connections. Defaults to 10.
79
+ sql_alchemy_max_overflow = 10
80
+
67
81
# The SqlAlchemy pool recycle is the number of seconds a connection
68
82
# can be idle in the pool before it is invalidated. This config does
69
83
# not apply to sqlite. If the number of DB connections is ever exceeded,
@@ -182,7 +196,7 @@ password =
182
196
[operators]
183
197
# The default owner assigned to each new operator, unless
184
198
# provided explicitly or passed via `default_args`
185
- default_owner = Airflow
199
+ default_owner = airflow
186
200
default_cpus = 1
187
201
default_ram = 512
188
202
default_disk = 512
@@ -191,9 +205,6 @@ default_gpus = 0
191
205
[hive]
192
206
# Default mapreduce queue for HiveOperator tasks
193
207
default_hive_mapred_queue =
194
- # Template for mapred_job_name in HiveOperator, supports the following named parameters:
195
- # hostname, dag_id, task_id, execution_date
196
- mapred_job_name_template = Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date}
197
208
198
209
[webserver]
199
210
# The base url of your website as airflow cannot guess what domain or
@@ -301,6 +312,9 @@ cookie_secure = False
301
312
# Set samesite policy on session cookie
302
313
cookie_samesite =
303
314
315
+ # Default setting for wrap toggle on DAG code and TI log views.
316
+ default_wrap = False
317
+
304
318
[email]
305
319
email_backend = airflow.utils.email.send_email_smtp
306
320
@@ -391,6 +405,13 @@ ssl_key =
391
405
ssl_cert =
392
406
ssl_cacert =
393
407
408
+ # Celery Pool implementation.
409
+ # Choices include: prefork (default), eventlet, gevent or solo.
410
+ # See:
411
+ # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
412
+ # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
413
+ pool = prefork
414
+
394
415
[celery_broker_transport_options]
395
416
# This section is for specifying options which can be passed to the
396
417
# underlying celery broker transport. See:
@@ -505,8 +526,8 @@ basedn = dc=example,dc=com
505
526
cacert = /etc/ca/ldap_ca.crt
506
527
search_scope = LEVEL
507
528
508
- # This setting allows the use of LDAP servers that either return a
509
- # broken schema, or do not return a schema.
529
+ # This setting allows the use of LDAP servers that either return a
530
+ # broken schema, or do not return a schema.
510
531
ignore_malformed_schema = False
511
532
512
533
[mesos]
@@ -567,10 +588,22 @@ api_rev = v3
567
588
hide_sensitive_variable_fields = True
568
589
569
590
[elasticsearch]
570
- elasticsearch_host =
571
- # we need to escape the curly braces by adding an additional curly brace
572
- elasticsearch_log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
573
- elasticsearch_end_of_log_mark = end_of_log
591
+ # Elasticsearch host
592
+ host =
593
+ # Format of the log_id, which is used to query for a given tasks logs
594
+ log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}}
595
+ # Used to mark the end of a log stream for a task
596
+ end_of_log_mark = end_of_log
597
+ # Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id
598
+ # Code will construct log_id using the log_id template from the argument above.
599
+ # NOTE: The code will prefix the https:// automatically, don't include that here.
600
+ frontend =
601
+ # Write the task logs to the stdout of the worker, rather than the default files
602
+ write_stdout = False
603
+ # Instead of the default log formatter, write the log lines as JSON
604
+ json_format = False
605
+ # Log fields to also attach to the json output, if enabled
606
+ json_fields = asctime, filename, lineno, levelname, message
574
607
575
608
[kubernetes]
576
609
# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
@@ -606,7 +639,6 @@ logs_volume_subpath =
606
639
# A shared volume claim for the logs
607
640
logs_volume_claim =
608
641
609
-
610
642
# For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync)
611
643
# Useful in local environment, discouraged in production
612
644
dags_volume_host =
@@ -634,7 +666,7 @@ git_password =
634
666
git_sync_root = /git
635
667
git_sync_dest = repo
636
668
# Mount point of the volume if git-sync is being used.
637
- # i.e. /root/airflow /dags
669
+ # i.e. {AIRFLOW_HOME} /dags
638
670
git_dags_folder_mount_point =
639
671
640
672
# To get Git-sync SSH authentication set up follow this format
@@ -705,6 +737,13 @@ affinity =
705
737
# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core
706
738
tolerations =
707
739
740
+ # **kwargs parameters to pass while calling a kubernetes client core_v1_api methods from Kubernetes Executor
741
+ # provided as a single line formatted JSON dictionary string.
742
+ # List of supported params in **kwargs are similar for all core_v1_apis, hence a single config variable for all apis
743
+ # See:
744
+ # https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py
745
+ kube_client_request_args =
746
+
708
747
# Worker pods security context options
709
748
# See:
710
749
# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
@@ -753,3 +792,9 @@ fs_group =
753
792
#
754
793
# Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY>
755
794
# formatting as supported by airflow normally.
795
+
796
+ [kubernetes_labels]
797
+ # The Key-value pairs to be given to worker pods.
798
+ # The worker pods will be given these static labels, as well as some additional dynamic labels
799
+ # to identify the task.
800
+ # Should be supplied in the format: key = value
0 commit comments