Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: Allow to change group names used in conditions to be able to use with bigger inventories #105

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions all_install.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
tags:
- node
roles:
- { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups['masters']" }
- { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }

## master -> install common part (for all masters - and sometimes etcd when colocated with masters)
- hosts: masters
Expand All @@ -84,10 +84,10 @@
- role: keepalived
tags: [ 'master', 'install', 'master_install', 'ha', 'keepalived']
when:
- ( groups['masters'] | length ) > 1
- ( groups[cluster_inventory_group.masters] | length ) > 1
- ( custom.networking.masterha_type | default('vip') ) == 'vip'

- hosts: primary-master
- hosts: primary-master
name: primary-master (or master in general) - it applies to both ha and non-ha
become: yes
become_method: sudo
Expand Down Expand Up @@ -122,7 +122,7 @@
- install
- node_install
roles:
- { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups['masters']" }
- { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }

## node -> label nodes (even when master is also a node)
- hosts: nodes
Expand Down
4 changes: 2 additions & 2 deletions all_reset.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@
tags:
- node
roles:
- { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups['masters']" }
- { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups['masters']" }
- { role: tools, task: reset, tags: [ 'reset', 'node_reset' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }
- { role: tools, task: weave_reset, tags: [ 'reset', 'node_reset', 'network_reset', 'weave_reset', 'weave' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }

- hosts: masters
become: yes
Expand Down
6 changes: 3 additions & 3 deletions group_vars/all/addons.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -106,19 +106,19 @@ helm:
## DASHBOARD ###
################
## This (v1) will be deprecated in favour of 2.0 - soon to be released
# - { name: dashboard, repo: stable/kubernetes-dashboard, namespace: kube-system, options: '--set image.repository={{ images_repo | default ("registry.k8s.io") }}/kubernetes-dashboard-{{ HOST_ARCH }} --set rbac.create=True,ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},ingress.hosts[2]={{ groups["primary-master"][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set rbac.create=True,rbac.clusterAdminRole=True --set enableInsecureLogin=True --set enableSkipLogin=True ' }
# - { name: dashboard, repo: stable/kubernetes-dashboard, namespace: kube-system, options: '--set image.repository={{ images_repo | default ("registry.k8s.io") }}/kubernetes-dashboard-{{ HOST_ARCH }} --set rbac.create=True,ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set rbac.create=True,rbac.clusterAdminRole=True --set enableInsecureLogin=True --set enableSkipLogin=True ' }
# For a learning/development --set rbac.clusterAdminRole=True with skip login and insecure might be acceptable, but not for real case scenarios!!!
# For in between, one can keep: rbac.clusterReadOnlyRole=True (if bug https://github.com/helm/charts/issues/15118 was solved)
# For a production, remove --set enableInsecureLogin=True --set enableSkipLogin=True --set rbac.clusterAdminRole=True

# Option 2:
# use the below if you are sure you don't need any auth to your dashboard, and you use k8s 1.15 or older.
# - { name: dashboard, repo: stable/kubernetes-dashboard, options: '--set rbac.create=True,ingress.enabled=True,ingress.hosts[0]={{groups["primary-master"][0]}},ingress.hosts[1]=dashboard.{{ custom.networking.dnsDomain }},image.tag=v1.8.3 --version=0.5.3' }
# - { name: dashboard, repo: stable/kubernetes-dashboard, options: '--set rbac.create=True,ingress.enabled=True,ingress.hosts[0]={{groups[cluster_inventory_group.primary_master][0]}},ingress.hosts[1]=dashboard.{{ custom.networking.dnsDomain }},image.tag=v1.8.3 --version=0.5.3' }

####################
## DASHBOARD 2.0 ###
####################
- { name: dashboard, repo: kubernetes-dashboard/kubernetes-dashboard, namespace: monitoring, options: '--set image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard --set ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},ingress.hosts[2]={{ groups["primary-master"][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set metricsScraper.enabled=true,metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true --set kong.image.repository={{ images_repo | default ("docker.io") }}/kong --set kong.admin.tls.enabled=false --set api.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-api --set web.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-web --set auth.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-auth --set api.scaling.replicas=1 --set app.ingress.enabled=True,app.ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},app.ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups["primary-master"][0]) }},app.ingress.hosts[2]={{ groups["primary-master"][0] }} --set app.scheduling.nodeSelector."node\-role\.kubernetes\.io/infra=" --set app.tolerations[0].effect=NoSchedule,app.tolerations[0].key="node-role.kubernetes.io/infra" --set app.tolerations[1].effect=PreferNoSchedule,app.tolerations[1].key="node-role.kubernetes.io/infra" ' }
- { name: dashboard, repo: kubernetes-dashboard/kubernetes-dashboard, namespace: monitoring, options: '--set image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard --set ingress.enabled=True,ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set nodeSelector."node\-role\.kubernetes\.io/infra=" --set tolerations[0].effect=NoSchedule,tolerations[0].key="node-role.kubernetes.io/infra" --set tolerations[1].effect=PreferNoSchedule,tolerations[1].key="node-role.kubernetes.io/infra" --set metricsScraper.enabled=true,metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true --set kong.image.repository={{ images_repo | default ("docker.io") }}/kong --set kong.admin.tls.enabled=false --set api.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-api --set web.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-web --set auth.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/dashboard-auth --set api.scaling.replicas=1 --set app.ingress.enabled=True,app.ingress.hosts[0]=dashboard.{{ custom.networking.dnsDomain }},app.ingress.hosts[1]={{ custom.networking.masterha_fqdn | default (groups[cluster_inventory_group.primary_master][0]) }},app.ingress.hosts[2]={{ groups[cluster_inventory_group.primary_master][0] }} --set app.scheduling.nodeSelector."node\-role\.kubernetes\.io/infra=" --set app.tolerations[0].effect=NoSchedule,app.tolerations[0].key="node-role.kubernetes.io/infra" --set app.tolerations[1].effect=PreferNoSchedule,app.tolerations[1].key="node-role.kubernetes.io/infra" ' }


#metricsScraper.image.repository={{ images_repo | default ("docker.io") }}/kubernetesui/metrics-scraper --set rbac.create=True,rbac.clusterReadOnlyRole=True --set protocolHttp=true' } # --version 4.0.0' } # https://github.com/kubernetes/dashboard/blob/master/aio/deploy/helm-chart/kubernetes-dashboard/Chart.yaml#L17
Expand Down
8 changes: 8 additions & 0 deletions group_vars/all/global.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,14 @@
# ansible_python_interpreter=/usr/bin/python3
## (the variable can also be defined per host if there is a need for mix)

## Defines mappings for the original gropus that were used in various conditions. Allows to use the playbook with inventory file that also contains other hosts/groups or even multiple clusters.
cluster_inventory_group:
all: 'all'
masters: 'masters'
nodes: 'nodes'
primary_master: 'primary-master'
secondary_masters: 'secondary-masters'

#####
## PROXY
## proxy environment variable, mainly for fetching addons
Expand Down
4 changes: 2 additions & 2 deletions only_nodes_only_install.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
tags:
- node
roles:
- { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups['masters']" }
- { role: common, task: all, tags: [ 'common', 'install', 'common_install', 'node_install', 'node' ], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }

## node -> install nodes (kubeadm join, etc)
- hosts: nodes
Expand All @@ -70,7 +70,7 @@
- install
- node_install
roles:
- { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups['masters']" }
- { role: non-primary-master, tags: [ 'node', 'install', 'node_install'], when: "inventory_hostname not in groups[cluster_inventory_group.masters]" }

## node -> label nodes (even when master is also a node)
- hosts: nodes
Expand Down
2 changes: 1 addition & 1 deletion only_secondaryMasters_only_install.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
- role: keepalived
tags: [ 'master', 'install', 'master_install', 'ha', 'keepalived']
when:
- ( groups['masters'] | length ) > 1
- ( groups[cluster_inventory_group.masters] | length ) > 1
- ( custom.networking.masterha_type | default('vip') ) == 'vip'

- hosts: secondary-masters
Expand Down
14 changes: 7 additions & 7 deletions roles/common/tasks/decide_master_name.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,29 +4,29 @@
# https://github.com/ReSearchITEng/kubeadm-playbook/issues/81 ( https://github.com/ansible/ansible/issues/38777 )
- block:
- name: by default set master name to inventory definition (no MasterHA case)
set_fact: master_name={{ groups['primary-master'][0] }}
set_fact: master_name={{ groups[cluster_inventory_group.primary_master][0] }}
when:
- groups['masters'] | length == 1
- groups[cluster_inventory_group.masters] | length == 1

- name: force use fqdn for master name (no MasterHA case) if inventory was not defined fqdn and we have to discover...
set_fact: master_name={{ hostvars[groups['primary-master'][0]]['ansible_fqdn'] }}
set_fact: master_name={{ hostvars[groups[cluster_inventory_group.primary_master][0]]['ansible_fqdn'] }}
when:
- custom.networking.fqdn.always or custom.networking.fqdn.master
- groups['masters'] | length == 1
- '"." not in groups["primary-master"][0]' # meaning it was not defined with fqdn, but we would like to force fqdn (per above custom.networking.fqdn condition)
- groups[cluster_inventory_group.masters] | length == 1
- '"." not in groups[cluster_inventory_group.primary_master][0]' # meaning it was not defined with fqdn, but we would like to force fqdn (per above custom.networking.fqdn condition)

- name: force use fqdn for master name (MasterHA case)
set_fact: master_name={{ custom.networking.masterha_fqdn }}
when:
- custom.networking.fqdn.always or custom.networking.fqdn.master
- groups['masters'] | length > 1
- groups[cluster_inventory_group.masters] | length > 1

- name: force use ip for master name (MasterHA case)
set_fact: master_name={{ custom.networking.masterha_ip }}
when:
- not custom.networking.fqdn.always
- not custom.networking.fqdn.master
- groups['masters'] | length > 1
- groups[cluster_inventory_group.masters] | length > 1

tags:
- always
2 changes: 1 addition & 1 deletion roles/common/tasks/kube_config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
when:
- ClusterConfiguration is defined
- ClusterConfiguration.cloudProvider is defined
- inventory_hostname in groups['masters']
- inventory_hostname in groups[cluster_inventory_group.masters]
tags:
- kubelet

Expand Down
12 changes: 6 additions & 6 deletions roles/non-primary-master/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
# environment -> is required due to a k8s bug which makes kubeadm need internet to generate a token. setting version is not allowed
# Optionally using "--config /etc/kubernetes/kubeadm-master.conf" to get rid of the message that it tries to connect to internet for version
register: kubeadm_token_whash_n
delegate_to: "{{groups['primary-master'][0]}}"
delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}"
run_once: yes
when:
- InitConfiguration is not defined or InitConfiguration.bootstrapTokens is not defined or InitConfiguration.bootstrapTokens[0].token is not defined
Expand Down Expand Up @@ -135,7 +135,7 @@
environment: '{{env_kc}}'
shell: "/usr/bin/kubeadm init phase upload-certs --upload-certs -- 2>/dev/null | tail -1 "
register: kubeadm_upload_certificate_key
delegate_to: "{{groups['primary-master'][0]}}"
delegate_to: "{{groups[cluster_inventory_group.primary_master][0]}}"
run_once: yes

## TODO: try to remove this, and keep the autodetermined addr:
Expand All @@ -148,7 +148,7 @@
JoinConfiguration: "{{ JoinConfiguration | combine ( { 'controlPlane': { 'certificateKey': kubeadm_upload_certificate_key.stdout_lines[0] } }, recursive=True) }}"

when:
- inventory_hostname in groups['secondary-masters']
- inventory_hostname in groups[cluster_inventory_group.secondary_masters]

### Cloud Config
- name: JoinConfiguration - cloudProvider merging {{ ClusterConfiguration.cloudProvider }} to the JoinConfiguration.nodeRegistration.kubeletExtraArgs
Expand Down Expand Up @@ -240,7 +240,7 @@
# changed_when: false

# - name: Check all nodes were registered
# shell: "/usr/bin/test $(kubectl get nodes --no-headers | grep -ow Ready | wc -l) >= {{ groups['nodes'] | length + groups['masters'] | length }}"
# shell: "/usr/bin/test $(kubectl get nodes --no-headers | grep -ow Ready | wc -l) >= {{ groups[cluster_inventory_group.nodes] | length + groups[cluster_inventory_group.masters] | length }}"
# register: command_result
# retries: 10
# delay: 3
Expand All @@ -263,7 +263,7 @@
regexp: '^export KUBECONFIG=.*'
when:
- shell is undefined or shell == 'bash'
- inventory_hostname in groups['nodes']
- inventory_hostname in groups[cluster_inventory_group.nodes]

- name: export KUBECONFIG in secondary-masters' ~/.bashrc
lineinfile:
Expand All @@ -274,4 +274,4 @@
regexp: '^export KUBECONFIG=.*'
when:
- shell is undefined or shell == 'bash'
- inventory_hostname in groups['secondary-masters']
- inventory_hostname in groups[cluster_inventory_group.secondary_masters]
4 changes: 2 additions & 2 deletions roles/post_deploy/tasks/taints.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
shell: 'kubectl taint nodes --selector {{ item.label }} {{ item.label }}:{{ item.taint }} --overwrite'
with_items: "{{ taint_for_label }}"
when:
- groups['all'] | length > 1
- groups[cluster_inventory_group.all] | length > 1
tags:
- taint

Expand All @@ -29,7 +29,7 @@
tags:
- taints
when:
- groups['all'] | length == 1
- groups[cluster_inventory_group.all] | length == 1
tags:
- taints

Loading