From 3a4505ae87bfb9e8bdfa90cf4d21054554f03b4d Mon Sep 17 00:00:00 2001 From: Shafeeque E S Date: Mon, 9 Oct 2023 11:59:15 +0530 Subject: [PATCH] Do not duplicate the configMap --- .../charts/agent/templates/daemonset.yaml | 2 +- .../internal/cilium/charts/config/Chart.yaml | 6 + .../templates/configmap.yaml | 0 .../charts/operator/templates/configmap.yaml | 659 ------------------ .../charts/operator/templates/deployment.yaml | 2 +- charts/internal/cilium/values.yaml | 2 + go.mod | 2 +- pkg/charts/config.go | 1 + pkg/charts/utils.go | 9 +- pkg/charts/values.go | 46 +- pkg/controller/actuator_reconcile.go | 28 +- 11 files changed, 77 insertions(+), 680 deletions(-) create mode 100644 charts/internal/cilium/charts/config/Chart.yaml rename charts/internal/cilium/charts/{agent => config}/templates/configmap.yaml (100%) delete mode 100644 charts/internal/cilium/charts/operator/templates/configmap.yaml diff --git a/charts/internal/cilium/charts/agent/templates/daemonset.yaml b/charts/internal/cilium/charts/agent/templates/daemonset.yaml index 22d2bda41..1bcbfa7c6 100644 --- a/charts/internal/cilium/charts/agent/templates/daemonset.yaml +++ b/charts/internal/cilium/charts/agent/templates/daemonset.yaml @@ -16,7 +16,7 @@ spec: template: metadata: annotations: - checksum/configmap-cilium: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/configmap-cilium: "{{ .Values.global.configMapHash }}" {{- if and .Values.global.prometheus.enabled (not .Values.global.prometheus.serviceMonitor.enabled) }} prometheus.io/port: "{{ .Values.global.prometheus.port }}" prometheus.io/scrape: "true" diff --git a/charts/internal/cilium/charts/config/Chart.yaml b/charts/internal/cilium/charts/config/Chart.yaml new file mode 100644 index 000000000..fb7a63014 --- /dev/null +++ b/charts/internal/cilium/charts/config/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +name: config +version: 0.1.0 +description: Helm chart for Cilium configuration +sources: + - https://github.com/gardener/gardener-extension-networking-cilium diff --git a/charts/internal/cilium/charts/agent/templates/configmap.yaml b/charts/internal/cilium/charts/config/templates/configmap.yaml similarity index 100% rename from charts/internal/cilium/charts/agent/templates/configmap.yaml rename to charts/internal/cilium/charts/config/templates/configmap.yaml diff --git a/charts/internal/cilium/charts/operator/templates/configmap.yaml b/charts/internal/cilium/charts/operator/templates/configmap.yaml deleted file mode 100644 index d1ab22865..000000000 --- a/charts/internal/cilium/charts/operator/templates/configmap.yaml +++ /dev/null @@ -1,659 +0,0 @@ -{{- /* Default values with backwards compatibility */ -}} -{{- $defaultEnableCnpStatusUpdates := "true" -}} -{{- $defaultBpfMapDynamicSizeRatio := 0.0 -}} -{{- $defaultBpfClockProbe := "false" -}} -{{- $defaultSessionAffinity := "false" -}} -{{- $defaultOperatorApiServeAddr := "localhost:9234" -}} -{{- $defaultBpfCtTcpMax := 524288 -}} -{{- $defaultBpfCtAnyMax := 262144 -}} -{{- $enableIdentityMark := "true" -}} - -{{- /* Default values when 1.8 was initially deployed */ -}} -{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}} -{{- $defaultEnableCnpStatusUpdates = "false" -}} -{{- $defaultBpfMapDynamicSizeRatio = 0.0025 -}} -{{- $defaultBpfClockProbe := "false" -}} -{{- $defaultSessionAffinity = "true" -}} -{{- if .Values.global.ipv4.enabled }} -{{- $defaultOperatorApiServeAddr = "127.0.0.1:9234" -}} -{{- else -}} -{{- $defaultOperatorApiServeAddr = "[::1]:9234" -}} -{{- end }} -{{- $defaultBpfCtTcpMax = 0 -}} -{{- $defaultBpfCtAnyMax = 0 -}} -{{- end -}} - -{{- $bpfCtTcpMax := (coalesce .Values.global.bpf.ctTcpMax $defaultBpfCtTcpMax) -}} -{{- $bpfCtAnyMax := (coalesce .Values.global.bpf.ctAnyMax $defaultBpfCtAnyMax) -}} - -apiVersion: v1 -kind: ConfigMap -metadata: - name: cilium-config - namespace: {{ .Release.Namespace }} -data: - - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in an etcd kvstore, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - identity-allocation-mode: {{ .Values.global.identityAllocationMode }} -{{- if .Values.global.identityHeartbeatTimeout }} - identity-heartbeat-timeout: "{{ .Values.global.identityHeartbeatTimeout }}" -{{- end }} -{{- if .Values.global.identityGCInterval }} - identity-gc-interval: "{{ .Values.global.identityGCInterval }}" -{{- end }} -{{- if .Values.global.endpointGCInterval }} - cilium-endpoint-gc-interval: "{{ .Values.global.endpointGCInterval }}" -{{- end }} -{{- if .Values.global.nodesGCInterval }} - nodes-gc-interval: "5m0s" -{{- end }} - skip-cnp-status-startup-clean: {{ .Values.global.skipCnpStatusStartupClean | quote}} -{{- if .Values.global.disableEndpointCrd }} - disable-endpoint-crd: "{{ .Values.global.disableEndpointCrd }}" -{{- end }} - -{{- if .Values.identityChangeGracePeriod }} - # identity-change-grace-period is the grace period that needs to pass - # before an endpoint that has changed its identity will start using - # that new identity. During the grace period, the new identity has - # already been allocated and other nodes in the cluster have a chance - # to whitelist the new upcoming identity of the endpoint. - identity-change-grace-period: {{ default "5s" .Values.identityChangeGracePeriod | quote }} -{{- end }} - - # If you want to run cilium in debug mode change this value to true - debug: {{ .Values.global.debug.enabled | quote }} - # The agent can be put into the following three policy enforcement modes - # default, always and never. - # https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes - enable-policy: "{{ .Values.global.agent.policyMode }}" - -{{- if .Values.global.debug.verbose }} - debug-verbose: "{{ .Values.global.debug.verbose }}" -{{- end }} - -{{- if ne (int .Values.global.agent.healthPort) 9876 }} - # Set the TCP port for the agent health status API. This is not the port used - # for cilium-health. - agent-health-port: "{{ .Values.global.agent.healthPort }}" -{{- end }} - -{{- if .Values.global.prometheus.enabled }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{ .Values.global.prometheus.port }}" -{{- end }} - -{{- if .Values.global.operatorPrometheus.enabled }} - # If you want metrics enabled in cilium-operator, set the port for - # which the Cilium Operator will have their metrics exposed. - # NOTE that this will open the port on the nodes where Cilium operator pod - # is scheduled. - operator-prometheus-serve-addr: ":{{ .Values.global.operatorPrometheus.port }}" - enable-metrics: "true" -{{- end }} - - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. -{{- if .Values.global.ipv4 }} - enable-ipv4: {{ .Values.global.ipv4.enabled | quote }} -{{- end }} - - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. -{{- if .Values.global.ipv6 }} - enable-ipv6: {{ .Values.global.ipv6.enabled | quote }} -{{- end }} - -{{- if .Values.global.cleanState }} - # If a serious issue occurs during Cilium startup, this - # invasive option may be set to true to remove all persistent - # state. Endpoints will not be restored using knowledge from a - # prior Cilium run, so they may receive new IP addresses upon - # restart. This also triggers clean-cilium-bpf-state. - clean-cilium-state: "true" -{{- end }} - -{{- if .Values.global.cleanBpfState }} - # If you want to clean cilium BPF state, set this to true; - # Removes all BPF maps from the filesystem. Upon restart, - # endpoints are restored with the same IP addresses, however - # any ongoing connections may be disrupted briefly. - # Loadbalancing decisions will be reset, so any ongoing - # connections via a service may be loadbalanced to a different - # backend after restart. - clean-cilium-bpf-state: "true" -{{- end }} - -{{- if .Values.global.cni.customConf }} - # Users who wish to specify their own custom CNI configuration file must set - # custom-cni-conf to "true", otherwise Cilium may overwrite the configuration. - custom-cni-conf: "{{ .Values.global.cni.customConf }}" -{{- end }} - -{{- if hasKey .Values "bpfClockProbe" }} - enable-bpf-clock-probe: {{ .Values.bpfClockProbe | quote }} -{{- else if eq $defaultBpfClockProbe "true" }} - enable-bpf-clock-probe: {{ $defaultBpfClockProbe | quote }} -{{- end }} - - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: {{ .Values.global.bpf.monitorAggregation }} - - # The monitor aggregation interval governs the typical time between monitor - # notification events for each allowed connection. - # - # Only effective when monitor aggregation is set to "medium" or higher. - monitor-aggregation-interval: {{ .Values.global.bpf.monitorInterval }} - - # The monitor aggregation flags determine which TCP flags which, upon the - # first observation, cause monitor notifications to be generated. - # - # Only effective when monitor aggregation is set to "medium" or higher. - monitor-aggregation-flags: {{ .Values.global.bpf.monitorFlags }} - -{{- if or $bpfCtTcpMax $bpfCtAnyMax }} - # bpf-ct-global-*-max specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, set bpf-ct-global-tcp-max to 1000000. -{{- if $bpfCtTcpMax }} - bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }} -{{- end }} -{{- if $bpfCtAnyMax }} - bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }} -{{- end }} -{{- end }} - -{{- if .Values.global.bpf.natMax }} - # bpf-nat-global-max specified the maximum number of entries in the - # BPF NAT table. - bpf-nat-global-max: "{{ .Values.global.bpf.natMax }}" -{{- end }} - -{{- if .Values.global.bpf.neighMax }} - # bpf-neigh-global-max specified the maximum number of entries in the - # BPF neighbor table. - bpf-neigh-global-max: "{{ .Values.global.bpf.neighMax }}" -{{- end }} -{{- if hasKey .Values "bpfMapDynamicSizeRatio" }} - bpf-map-dynamic-size-ratio: {{ .Values.bpfMapDynamicSizeRatio | quote }} -{{- else if ne $defaultBpfMapDynamicSizeRatio 0.0 }} - # Specifies the ratio (0.0-1.0) of total system memory to use for dynamic - # sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps. - bpf-map-dynamic-size-ratio: {{ $defaultBpfMapDynamicSizeRatio | quote }} -{{- end }} -{{- if .Values.global.bpf.policyMapMax }} - # bpf-policy-map-max specifies the maximum number of entries in endpoint - # policy map (per endpoint) - bpf-policy-map-max: "{{ .Values.global.bpf.policyMapMax }}" -{{- end }} -{{- if .Values.global.bpf.lbMapMax }} - # bpf-lb-map-max specifies the maximum number of entries in bpf lb service, - # backend and affinity maps. - bpf-lb-map-max: "{{ .Values.global.bpf.lbMapMax }}" -{{- end }} -{{- if .Values.global.bpf.lbMode }} - {{- if and (eq .Values.global.bpf.lbMode "dsr") (not .Values.global.ipv4NativeRoutingCIDR) }} - {{ fail "bpf-lb-mode 'dsr' requires native routing to be enabled" }} - {{- end }} - # Specifies the bpf load balancing mode ("snat", "dsr", "hybrid") - bpf-lb-mode: {{ .Values.global.bpf.lbMode }} -{{- end }} -{{- if .Values.global.bpf.lbExternalClusterip }} - # bpf-lb-bypass-fib-lookup instructs Cilium to enable the FIB lookup bypass - # optimization for nodeport reverse NAT handling. - bpf-lb-external-clusterip: "{{ .Values.global.bpf.lbExternalClusterip }}" -{{- end }} - - # Enable socket-based LB for E/W traffic - bpf-lb-sock: "{{ .Values.global.bpfSocketLB.enabled }}" - -{{- if .Values.global.bpfSocketLBHostnsOnly.enabled }} - # bpf-lb-sock-hostns-only skip socket LB for services when inside a pod namespace, in favor of service LB at the pod interface. - # Socket LB is still used when in the host namespace. Required by service mesh (e.g., Istio, Linkerd). - bpf-lb-sock-hostns-only: "{{ .Values.global.bpfSocketLBHostnsOnly.enabled }}" -{{- end }} - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # As a result, reply packets may be dropped and the load-balancing decisions - # for established connections may change. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{ .Values.global.bpf.preallocateMaps }}" - - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .Values.global.proxy.sidecarImageRegex }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: {{ .Values.global.cluster.name }} - -{{- if .Values.global.cluster.id }} - # Unique ID of the cluster. Must be unique across all conneted clusters and - # in the range of 1 and 255. Only relevant when building a mesh of clusters. - cluster-id: "{{ .Values.global.cluster.id }}" -{{- end }} - - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve -{{- if eq .Values.global.tunnel "disabled" }} - routing-mode: "native" -{{- else if eq .Values.global.tunnel "vxlan" }} - routing-mode: "tunnel" - tunnel-protocol: "vxlan" -{{- else if eq .Values.global.tunnel "geneve" }} - routing-mode: "tunnel" - tunnel-protocol: "geneve" -{{- end }} - - -{{- if .Values.global.eni }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" -{{- end }} - -{{- if .Values.global.azure.enabled }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - enable-local-node-route: "false" -{{- end }} - -{{- if .Values.global.flannel.enabled }} - # Interface to be used when running Cilium on top of a CNI plugin. - # For flannel, use "cni0" - flannel-master-device: {{ .Values.global.flannel.masterDevice }} - - # When running Cilium with policy enforcement enabled on top of a CNI plugin - # the BPF programs will be installed on the network interface specified in - # 'flannel-master-device' and on all network interfaces belonging to - # a container. When the Cilium DaemonSet is removed, the BPF programs will - # be kept in the interfaces unless this option is set to "true". - flannel-uninstall-on-exit: "{{ .Values.global.flannel.uninstallOnExit}}" - -{{- end }} - -{{- if .Values.global.l7Proxy }} - # Enables L7 proxy for L7 policy enforcement and visibility - enable-l7-proxy: {{ .Values.global.l7Proxy.enabled | quote }} -{{- end }} - - enable-ipv4-big-tcp: {{ .Values.global.enableIpv4BigTCP | quote }} - enable-ipv4-masquerade: {{ .Values.global.enableIpv4Masquerade | quote }} - enable-ipv6-big-tcp: {{ .Values.global.enableIpv6BigTCP | quote }} - enable-ipv6-masquerade: {{ .Values.global.enableIpv6Masquerade | quote }} -{{- if not .Values.global.snatToUpstreamDNS.enabled }} - enable-bpf-masquerade: {{ .Values.global.enableBPFMasquerade | quote }} -{{- end }} - - enable-xt-socket-fallback: {{ .Values.global.enableXTSocketFallback | quote }} - install-iptables-rules: {{ .Values.global.installIptablesRules | quote }} - install-no-conntrack-iptables-rules: {{ .Values.global.installNoConntrackIptablesRules | quote }} - - auto-direct-node-routes: {{ .Values.global.autoDirectNodeRoutes | quote }} - -{{- if .Values.global.localRedirectPolicy.enabled }} - enable-local-redirect-policy: {{ .Values.global.localRedirectPolicy.enabled | quote }} -{{- end }} - - # DNS Polling periodically issues a DNS lookup for each `matchName` from - # cilium-agent. The result is used to regenerate endpoint policy. - # DNS lookups are repeated with an interval of 5 seconds, and are made for - # A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP - # data is used instead. An IP change will trigger a regeneration of the Cilium - # policy for each endpoint and increment the per cilium-agent policy - # repository revision. - # - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "false" - -{{- if ne .Values.global.cni.chainingMode "none" }} - # Enable chaining with another CNI plugin - # - # Supported modes: - # - none - # - aws-cni - # - flannel - # - portmap (Enables HostPort support for Cilium) - cni-chaining-mode: {{ .Values.global.cni.chainingMode }} - -{{- if hasKey .Values "enableIdentityMark"}} - enable-identity-mark: {{ .Values.global.enableIdentityMark | quote }} -{{- else if (ne $enableIdentityMark "true") }} - enable-identity-mark: "false" -{{- end }} - -{{- if ne .Values.global.cni.chainingMode "portmap" }} - # Disable the PodCIDR route to the cilium_host interface as it is not - # required. While chaining, it is the responsibility of the underlying plugin - # to enable routing. - enable-local-node-route: "false" -{{- end }} -{{- end }} - -{{- if .Values.global.egressMasqueradeInterfaces }} - egress-masquerade-interfaces: {{ .Values.global.egressMasqueradeInterfaces }} -{{- end }} -{{- if and .Values.global.ipMasqAgent .Values.global.ipMasqAgent.enabled }} - enable-ip-masq-agent: "true" -{{- end }} - -{{- if .Values.global.encryption.enabled }} - enable-ipsec: {{ .Values.global.encryption.enabled | quote }} - ipsec-key-file: {{ .Values.global.encryption.mountPath }}/{{ .Values.global.encryption.keyFile }} -{{- if .Values.global.encryption.interface }} - encrypt-interface: {{ .Values.global.encryption.interface }} -{{- end }} -{{- if .Values.global.encryption.nodeEncryption }} - encrypt-node: {{ .Values.global.encryption.nodeEncryption | quote }} -{{- end }} -{{- end }} -{{- if .Values.global.datapathMode }} -{{- if eq .Values.global.datapathMode "ipvlan" }} - datapath-mode: ipvlan - ipvlan-master-device: {{ .Values.global.ipvlan.masterDevice }} -{{- end }} -{{- end }} -{{- if .Values.global.iptablesLockTimeout }} - iptables-lock-timeout: {{ .Values.global.iptablesLockTimeout | quote }} -{{- end }} -{{- if .Values.global.nativeRoutingCIDR }} - native-routing-cidr: {{ .Values.global.nativeRoutingCIDR }} -{{- end }} -{{- if .Values.global.ipv4NativeRoutingCIDR }} - ipv4-native-routing-cidr: {{ .Values.global.ipv4NativeRoutingCIDR }} -{{- end }} - -{{- if .Values.global.hostFirewall }} - enable-host-firewall: {{ .Values.global.hostFirewall | quote }} -{{- end}} - -{{- if .Values.global.mtu }} - mtu: {{ .Values.global.mtu | quote }} -{{- end}} - -{{- if .Values.global.devices }} - # List of devices used to attach bpf_host.o (implements BPF NodePort, - # host-firewall and BPF masquerading) - devices: {{ join " " .Values.global.devices | quote }} -{{- end }} - -{{- if .Values.global.egressGateway.enabled }} - {{- if ne .Values.global.kubeProxyReplacement "strict" }} - {{ fail "kubeProxyReplacement must be set to 'strict' in order to enable egressGateway" }} - {{- end}} - enable-ipv4-egress-gateway: "true" -{{- end}} -{{- if .Values.global.kubeProxyReplacement }} - kube-proxy-replacement: {{ .Values.global.kubeProxyReplacement | quote }} -{{- end }} -{{- if ne .Values.global.kubeProxyReplacement "disabled" }} - kube-proxy-replacement-healthz-bind-address: {{ .Values.global.kubeProxyReplacementHealthzBindAddr | quote}} -{{- end }} - -{{- if .Values.global.hostServices }} -{{- if .Values.global.hostServices.enabled }} - enable-host-reachable-services: {{ .Values.global.hostServices.enabled | quote }} -{{- end }} -{{- if ne .Values.global.hostServices.protocols "tcp,udp" }} - host-reachable-services-protos: {{ .Values.global.hostServices.protocols }} -{{- end }} -{{- end }} -{{- if .Values.global.hostPort }} -{{- if or (eq .Values.global.kubeProxyReplacement "partial") (eq .Values.global.kubeProxyReplacement "false") }} - enable-host-port: {{ .Values.global.hostPort.enabled | quote }} -{{- end }} -{{- end }} -{{- if .Values.global.externalIPs }} -{{- if or (eq .Values.global.kubeProxyReplacement "partial") (eq .Values.global.kubeProxyReplacement "false") }} - enable-external-ips: {{ .Values.global.externalIPs.enabled | quote }} -{{- end }} -{{- end }} -{{- if .Values.global.nodePort }} -{{- if or (eq .Values.global.kubeProxyReplacement "partial") (eq .Values.global.kubeProxyReplacement "false") }} - enable-node-port: {{ .Values.global.nodePort.enabled | quote }} -{{- end }} -{{- if .Values.global.nodePort.range }} - node-port-range: {{ .Values.global.nodePort.range | quote }} -{{- end }} -{{- if .Values.global.nodePort.device }} - device: {{ .Values.global.nodePort.device | quote }} -{{- end }} -{{- if .Values.global.nodePort.directRoutingDevice }} - direct-routing-device: {{ .Values.global.nodePort.directRoutingDevice | quote }} -{{- end }} -{{- if .Values.global.nodePort.mode }} - node-port-mode: {{ .Values.global.nodePort.mode | quote }} -{{- end }} -{{- if .Values.global.nodePort.acceleration }} - node-port-acceleration: {{ .Values.global.nodePort.acceleration | quote }} -{{- end }} - node-port-bind-protection: {{ .Values.global.nodePort.bindProtection | quote }} - enable-auto-protect-node-port-range: {{ .Values.global.nodePort.autoProtectPortRange | quote }} - enable-service-topology: {{ .Values.global.loadBalancer.serviceTopology | quote }} - enable-svc-source-range-check: {{ .Values.global.enableSvcSrcRangeCheck | quote }} -{{- end }} -{{- if hasKey .Values "sessionAffinity" }} - enable-session-affinity: {{ .Values.sessionAffinity | quote }} -{{- else if eq $defaultSessionAffinity "true" }} - enable-session-affinity: {{ $defaultSessionAffinity | quote }} -{{- end }} -{{- if .Values.global.l2NeighDiscovery.enabled }} - enable-l2-neigh-discovery: {{ .Values.global.l2NeighDiscovery.enabled | quote }} -{{- end }} -{{- if .Values.global.arpingRefreshPeriod }} - arping-refresh-period: {{ .Values.global.arpingRefreshPeriod }} -{{- end }} -{{- if .Values.global.cni.uninstall }} - cni-uninstall: {{ .Values.global.cni.uninstall | quote }} -{{- end }} -{{- if .Values.global.enableK8SNetworkpolicy }} - enable-k8s-networkpolicy: {{ .Values.global.enableK8SNetworkpolicy | quote }} -{{- end }} -{{- if and .Values.global.pprof .Values.global.pprof.enabled }} - pprof: {{ .Values.global.pprof.enabled | quote }} -{{- end }} -{{- if .Values.global.logSystemLoad }} - log-system-load: {{ .Values.global.logSystemLoad | quote }} -{{- end }} -{{- if .Values.global.logOptions }} - log-opt: {{ toYaml .Values.global.logOptions | nindent 4 }} -{{- end }} -{{- if and .Values.global.sockops .Values.global.sockops.enabled }} - sockops-enable: {{ .Values.global.sockops.enabled | quote }} -{{- end }} -{{- if and .Values.global.endpointRoutes .Values.global.endpointRoutes.enabled }} - enable-endpoint-routes: {{ .Values.global.endpointRoutes.enabled | quote }} -{{- end }} - write-cni-conf-when-ready: {{ .Values.global.cni.hostConfDirMountPath }}/05-cilium.conflist - cni-exclusive: {{ .Values.global.cni.exclusive | quote }} - cni-log-file: {{ .Values.global.cni.logFile }} -{{- if .Values.global.cni.readCniConf }} - read-cni-conf: {{ .Values.global.cni.readCniConf }} -{{- end }} - -{{- if .Values.global.kubeConfigPath }} - k8s-kubeconfig-path: {{ .Values.global.kubeConfigPath | quote }} -{{- end }} -{{- if and ( .Values.global.endpointHealthChecking.enabled ) (or (eq .Values.global.cni.chainingMode "portmap") (eq .Values.global.cni.chainingMode "none")) }} - enable-endpoint-health-checking: "true" -{{- else}} - # Disable health checking, when chaining mode is not set to portmap or none - enable-endpoint-health-checking: "false" -{{- end }} -{{- if hasKey .Values "healthChecking" }} - enable-health-checking: {{ .Values.healthChecking | quote }} -{{- end }} -{{- if .Values.global.wellKnownIdentities.enabled }} - enable-well-known-identities: "true" -{{- else }} - enable-well-known-identities: "false" -{{- end }} - enable-remote-node-identity: {{ .Values.global.remoteNodeIdentity | quote }} - enable-api-rate-limit: {{ .Values.global.apiRateLimit | quote }} -{{- if hasKey .Values "synchronizeK8sNodes" }} - synchronize-k8s-nodes: {{ .Values.synchronizeK8sNodes | quote }} -{{- end }} -{{- if .Values.policyAuditMode }} - policy-audit-mode: {{ .Values.policyAuditMode | quote }} -{{- end }} - -{{- if ne $defaultOperatorApiServeAddr "localhost:9234" }} - operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }} -{{- end }} - -{{- if .Values.global.hubble.enabled }} - # Enable Hubble gRPC service. - enable-hubble: {{ .Values.global.hubble.enabled | quote }} - # UNIX domain socket for Hubble server to listen to. - hubble-socket-path: {{ .Values.global.hubble.socketPath | quote }} -{{- if .Values.global.hubble.eventQueueSize }} - # Buffer size of the channel for Hubble to receive monitor events. If this field is not set, - # the buffer size is set to the default monitor queue size. - hubble-event-queue-size: {{ .Values.global.hubble.eventQueueSize | quote }} -{{- end }} -{{- if .Values.global.hubble.flowBufferSize }} - # Size of the buffer to store recent flows. - hubble-flow-buffer-size: {{ .Values.global.hubble.flowBufferSize | quote }} -{{- end }} -{{- if .Values.global.hubble.metrics.enabled }} - # Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this - # field is not set. - hubble-metrics-server: ":{{ .Values.global.hubble.metrics.port }}" - # A space separated list of metrics to enable. See [0] for available metrics. - # - # https://github.com/cilium/hubble/blob/master/Documentation/metrics.md - hubble-metrics: {{- range .Values.global.hubble.metrics.enabled }} - {{.}} - {{- end }} -{{- end }} - # An additional address for Hubble server to listen to (e.g. ":4244"). - hubble-listen-address: {{ .Values.global.hubble.listenAddress | quote }} -{{- end }} - hubble-disable-tls: {{ (not .Values.global.hubble.tls.enabled) | quote }} -{{- if .Values.global.hubble.tls.enabled }} - hubble-tls-auto-enabled: {{ .Values.global.hubble.tls.auto.enabled | quote }} - hubble-tls-cert-file: {{ .Values.global.hubble.tls.certFile | quote }} - hubble-tls-key-file: {{ .Values.global.hubble.tls.keyFile | quote }} - hubble-tls-client-ca-files: {{ .Values.global.hubble.tls.clientCAFiles | quote }} -{{- end }} -{{- if .Values.disableIptablesFeederRules }} - # A space separated list of iptables chains to disable when installing feeder rules. - disable-iptables-feeder-rules: {{ .Values.disableIptablesFeederRules | join " " | quote }} -{{- end }} - ipam: {{ .Values.global.ipam.mode }} -{{- if (eq .Values.global.ipam.mode "kubernetes" )}} - {{- if .Values.global.ipv4.enabled }} - k8s-require-ipv4-pod-cidr: "true" - {{- end }} - {{- if .Values.global.ipv6.enabled }} - k8s-require-ipv6-pod-cidr: "true" - {{- end }} -{{- end }} -{{- if (eq .Values.global.ipam.mode "cluster-pool") }} - ipam-cilium-node-update-rate: "15s" - {{- if .Values.global.ipv4.enabled }} - cluster-pool-ipv4-cidr: {{ .Values.global.podCIDR | quote }} - cluster-pool-ipv4-mask-size: {{ .Values.global.ipam.operator.clusterPoolIPv4MaskSize | quote }} - {{- end }} - {{- if .Values.global.ipv6.enabled }} - cluster-pool-ipv6-cidr: {{ .Values.global.ipam.operator.clusterPoolIPv6PodCIDR | quote }} - cluster-pool-ipv6-mask-size: {{ .Values.global.ipam.operator.clusterPoolIPv6MaskSize | quote }} - {{- end }} -{{- end }} -{{- if hasKey .Values "enableCnpStatusUpdates" }} - disable-cnp-status-updates: {{ (not .Values.enableCnpStatusUpdates) | quote }} -{{- else if (eq $defaultEnableCnpStatusUpdates "false") }} - disable-cnp-status-updates: "true" - cnp-node-status-gc-interval: "0s" -{{- end }} - - egress-gateway-reconciliation-trigger-interval: "1s" - enable-vtep: "false" - vtep-endpoint: "" - vtep-cidr: "" - vtep-mask: "" - vtep-mac: "" - enable-bgp-control-plane: "false" - procfs: "/host/proc" - bpf-root: "/sys/fs/bpf" - cgroup-root: "/run/cilium/cgroupv2" - enable-k8s-terminating-endpoint: "true" - enable-sctp: "false" - k8s-client-qps: "5" - k8s-client-burst: "10" - annotate-k8s-node: "true" - remove-cilium-node-taints: "true" - set-cilium-node-taints: "true" - set-cilium-is-up-condition: "true" - unmanaged-pod-watcher-interval: "15" - tofqdns-dns-reject-response-code: "refused" - tofqdns-enable-dns-compression: "true" - tofqdns-endpoint-max-ip-per-hostname: "50" - tofqdns-idle-connection-grace-period: "0s" - tofqdns-max-deferred-connection-deletes: "10000" - tofqdns-proxy-response-max-delay: "100ms" - agent-not-ready-taint-key: "node.cilium.io/agent-not-ready" - - mesh-auth-enabled: "true" - mesh-auth-queue-size: "1024" - mesh-auth-rotated-identities-queue-size: "1024" - mesh-auth-gc-interval: "5m0s" - - proxy-connect-timeout: "2" - proxy-max-requests-per-connection: "0" - proxy-max-connection-duration-seconds: "0" - - external-envoy-proxy: "false" - -{{- if hasKey .Values "blacklistConflictingRoutes" }} - # Configure blacklisting of local routes not owned by Cilium. - blacklist-conflicting-routes: {{ .Values.blacklistConflictingRoutes | quote }} -{{- end }} diff --git a/charts/internal/cilium/charts/operator/templates/deployment.yaml b/charts/internal/cilium/charts/operator/templates/deployment.yaml index 4a29b3d95..ba2917fb9 100644 --- a/charts/internal/cilium/charts/operator/templates/deployment.yaml +++ b/charts/internal/cilium/charts/operator/templates/deployment.yaml @@ -27,7 +27,7 @@ spec: template: metadata: annotations: - checksum/configmap-cilium: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/configmap-cilium: "{{ .Values.global.configMapHash }}" {{- if and .Values.global.prometheus.enabled (not .Values.global.prometheus.serviceMonitor.enabled) }} prometheus.io/port: {{ .Values.global.operatorPrometheus.port | quote }} prometheus.io/scrape: "true" diff --git a/charts/internal/cilium/values.yaml b/charts/internal/cilium/values.yaml index a0b261cde..3031724b3 100644 --- a/charts/internal/cilium/values.yaml +++ b/charts/internal/cilium/values.yaml @@ -13,6 +13,8 @@ requirements: # global groups all configuration options that have effect on all sub-charts global: + configMapHash: "" + egressGateway: enabled: false diff --git a/go.mod b/go.mod index 2d9c7c11f..e6bee6454 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/spf13/pflag v1.0.5 go.uber.org/mock v0.2.0 golang.org/x/tools v0.12.0 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.2 k8s.io/apimachinery v0.28.2 k8s.io/client-go v0.28.2 @@ -107,7 +108,6 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect istio.io/api v0.0.0-20230217221049-9d422bf48675 // indirect istio.io/client-go v1.17.1 // indirect diff --git a/pkg/charts/config.go b/pkg/charts/config.go index 9c60b94f0..957835293 100644 --- a/pkg/charts/config.go +++ b/pkg/charts/config.go @@ -46,6 +46,7 @@ type globalConfig struct { SnatToUpstreamDNS snatToUpstreamDNS `json:"snatToUpstreamDNS"` SnatOutOfCluster snatOutOfCluster `json:"snatOutOfCluster"` AutoDirectNodeRoutes bool `json:"autoDirectNodeRoutes"` + ConfigMapHash string `json:"configMapHash"` } // etcd related configuration for cilium diff --git a/pkg/charts/utils.go b/pkg/charts/utils.go index 706ecbf12..ff44e1bf8 100644 --- a/pkg/charts/utils.go +++ b/pkg/charts/utils.go @@ -120,6 +120,7 @@ var defaultGlobalConfig = globalConfig{ Enabled: false, }, AutoDirectNodeRoutes: false, + ConfigMapHash: "", } func newGlobalConfig() globalConfig { @@ -131,8 +132,8 @@ func newRequirementsConfig() requirementsConfig { } // ComputeCiliumChartValues computes the values for the cilium chart. -func ComputeCiliumChartValues(config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode string) (*ciliumConfig, error) { - requirementsConfig, globalConfig, err := generateChartValues(config, network, cluster, ipamMode) +func ComputeCiliumChartValues(config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode, configMapHash string) (*ciliumConfig, error) { + requirementsConfig, globalConfig, err := generateChartValues(config, network, cluster, ipamMode, configMapHash) if err != nil { return nil, fmt.Errorf("error when generating config values %w", err) } @@ -143,12 +144,14 @@ func ComputeCiliumChartValues(config *ciliumv1alpha1.NetworkConfig, network *ext }, nil } -func generateChartValues(config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode string) (requirementsConfig, globalConfig, error) { +func generateChartValues(config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode, configMapHash string) (requirementsConfig, globalConfig, error) { var ( requirementsConfig = newRequirementsConfig() globalConfig = newGlobalConfig() ) + globalConfig.ConfigMapHash = configMapHash + if network.Spec.PodCIDR != "" { globalConfig.PodCIDR = network.Spec.PodCIDR } diff --git a/pkg/charts/values.go b/pkg/charts/values.go index 8c2a5c7db..8b3d3218e 100644 --- a/pkg/charts/values.go +++ b/pkg/charts/values.go @@ -15,9 +15,14 @@ package charts import ( + "fmt" + extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" "github.com/gardener/gardener/pkg/chartrenderer" + "github.com/gardener/gardener/pkg/utils" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/gardener/gardener-extension-networking-cilium/charts" @@ -29,16 +34,51 @@ import ( const CiliumConfigKey = "config.yaml" // RenderCiliumChart renders the cilium chart with the given values. -func RenderCiliumChart(renderer chartrenderer.Interface, config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode string) ([]byte, error) { - values, err := ComputeCiliumChartValues(config, network, cluster, ipamMode) +func RenderCiliumChart(renderer chartrenderer.Interface, config *ciliumv1alpha1.NetworkConfig, network *extensionsv1alpha1.Network, cluster *extensionscontroller.Cluster, ipamMode, configMapHash string) ([]byte, error) { + var release *chartrenderer.RenderedChart + + values, err := ComputeCiliumChartValues(config, network, cluster, ipamMode, configMapHash) + if err != nil { + return nil, err + } + + release, err = renderer.RenderEmbeddedFS(charts.InternalChart, cilium.CiliumChartPath, cilium.ReleaseName, metav1.NamespaceSystem, values) if err != nil { return nil, err } - release, err := renderer.RenderEmbeddedFS(charts.InternalChart, cilium.CiliumChartPath, cilium.ReleaseName, metav1.NamespaceSystem, values) + newConfigMapHash, err := getConfigMapHash(release) if err != nil { return nil, err } + if newConfigMapHash != configMapHash { + // Render the charts with the new configMap hash. + values, err := ComputeCiliumChartValues(config, network, cluster, ipamMode, newConfigMapHash) + if err != nil { + return nil, err + } + + release, err = renderer.RenderEmbeddedFS(charts.InternalChart, cilium.CiliumChartPath, cilium.ReleaseName, metav1.NamespaceSystem, values) + if err != nil { + return nil, err + } + } + return release.Manifest(), nil } + +func getConfigMapHash(release *chartrenderer.RenderedChart) (string, error) { + configMap := &corev1.ConfigMap{} + configMapPath := "cilium/charts/config/templates/configmap.yaml" + configMapFile, ok := release.Files()[configMapPath] + if !ok { + return "", fmt.Errorf("configmap not found in the given path: %s", configMapPath) + } + + if err := yaml.Unmarshal([]byte(configMapFile), &configMap); err != nil { + return "", fmt.Errorf("error unmarshalling configMap: %w, %s", err, configMapFile) + } + + return utils.ComputeConfigMapChecksum(configMap.Data), nil +} diff --git a/pkg/controller/actuator_reconcile.go b/pkg/controller/actuator_reconcile.go index 50e148f19..c0b55128c 100644 --- a/pkg/controller/actuator_reconcile.go +++ b/pkg/controller/actuator_reconcile.go @@ -25,6 +25,7 @@ import ( v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" gardenerkubernetes "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/utils" "github.com/gardener/gardener/pkg/utils/chart" "github.com/gardener/gardener/pkg/utils/managedresources" "github.com/go-logr/logr" @@ -135,12 +136,12 @@ func (a *actuator) Reconcile(ctx context.Context, _ logr.Logger, network *extens return fmt.Errorf("could not create chart renderer for shoot '%s': %w", network.Namespace, err) } - ipamMode, err := getIPAMMode(ctx, a.client, cluster) + configMap, err := getCiliumConfigMap(ctx, a.client, cluster) if err != nil { - return err + return fmt.Errorf("error getting cilium configMap: %w", err) } - ciliumChart, err := chartspkg.RenderCiliumChart(chartRenderer, networkConfig, network, cluster, ipamMode) + ciliumChart, err := chartspkg.RenderCiliumChart(chartRenderer, networkConfig, network, cluster, getIPAMMode(configMap), getConfigMapHash(configMap)) if err != nil { return err } @@ -167,15 +168,18 @@ func getCiliumConfigMap(ctx context.Context, cl client.Client, cluster *extensio return configmap, nil } -func getIPAMMode(ctx context.Context, cl client.Client, cluster *extensionscontroller.Cluster) (string, error) { - configmap, err := getCiliumConfigMap(ctx, cl, cluster) - if err != nil { - return "", err - } - if configmap != nil { - if ipamMode, ok := configmap.Data["ipam"]; ok { - return ipamMode, nil +func getIPAMMode(configMap *corev1.ConfigMap) string { + if configMap != nil { + if ipamMode, ok := configMap.Data["ipam"]; ok { + return ipamMode } } - return "kubernetes", nil + return "kubernetes" +} + +func getConfigMapHash(configMap *corev1.ConfigMap) string { + if configMap != nil { + return utils.ComputeConfigMapChecksum(configMap.Data) + } + return "" }