forked from man40/databricks-cicd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
default.ini
95 lines (78 loc) · 2.1 KB
/
default.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
[global]
# local_path defines the path to the folder that contains all resources to deploy. Relative to the working dir.
local_path: .
# Number of objects to be altered on a single deploy. It prevents accidental misconfiguration to wipe the whole server.
# Any modifications on the target databricks environment are final. There is no rollback mechanism in place
# If the safety limit is reached within a valid reason, just rerun the pipeline and it will continue where it left off.
deploy_safety_limit: 10
# if Databricks API rate limit is reached, the deploy process will wait before attempts again. 0 means abort. seconds
rate_limit_attempts: 5
rate_limit_timeout: 10
[workspace]
deploy: True
local_sub_dir: workspace
[instance_pools]
deploy: True
local_sub_dir: instance_pools
ignore_attributes:
default_tags
instance_pool_id
state
stats
status
[clusters]
deploy: True
local_sub_dir: clusters
ignore_attributes:
cluster_id
cluster_cores
cluster_memory_mb
cluster_source
creator_user_name
default_tags
disk_spec
driver
driver_instance_pool_id
driver_instance_source
effective_spark_version
enable_local_disk_encryption
executors
init_scripts_safe_mode
instance_source
instance_pool_name
jdbc_port
last_activity_time
last_restarted_time
last_state_loss_time
spark_context_id
start_time
state
state_message
terminated_time
termination_reason
ignore_attributes_with_instance_pool:
azure_attributes
driver_node_type_id
enable_elastic_disk
node_type_id
[jobs]
deploy: True
local_sub_dir: jobs
# This will remove any attributes that are defined in the job source files
strip_attributes:
# email_notifications
# schedule
[dbfs]
deploy: True
local_sub_dir: dbfs
compare_contents: False
# How many bytes to send in a single call, while transfering a file
transfer_block_size: 512 * 1024
[validate]
workspace_python_notebook_header: True
clusters_no_nested_folders: True
clusters_name: True
jobs_no_nested_folders: True
jobs_name: True
jobs_existing_clusters: True
jobs_notebook_paths: True