diff --git a/kubernetes-gcp-go/Pulumi.yaml b/kubernetes-gcp-go/Pulumi.yaml new file mode 100644 index 000000000..f796d7e1a --- /dev/null +++ b/kubernetes-gcp-go/Pulumi.yaml @@ -0,0 +1,14 @@ +name: ${PROJECT} +description: ${DESCRIPTION} +runtime: go +template: + description: A Go program to create a Kubernetes cluster on Google Cloud + config: + gcp:project: + description: The Google Cloud project to deploy into + gcp:region: + default: us-central1 + description: The Google Cloud region to deploy into + nodesPerZone: + default: 1 + description: The desired number of nodes PER ZONE in the nodepool diff --git a/kubernetes-gcp-go/go.mod b/kubernetes-gcp-go/go.mod new file mode 100644 index 000000000..2d518e06d --- /dev/null +++ b/kubernetes-gcp-go/go.mod @@ -0,0 +1,8 @@ +module tmp + +go 1.17 + +require ( + github.com/pulumi/pulumi/sdk/v3 v3.30.0 + github.com/pulumi/pulumi-gcp/sdk/v6 v6.39.0 +) \ No newline at end of file diff --git a/kubernetes-gcp-go/main.go b/kubernetes-gcp-go/main.go new file mode 100644 index 000000000..188da0734 --- /dev/null +++ b/kubernetes-gcp-go/main.go @@ -0,0 +1,152 @@ +package main + +import ( + "fmt" + + "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute" + "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/container" + "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/serviceaccount" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config" +) + +func main() { + pulumi.Run(func(ctx *pulumi.Context) error { + // Get some provider-namespaced configuration values + providerCfg := config.New(ctx, "gcp") + gcpProject := providerCfg.Require("project") + gcpRegion, err := providerCfg.Try("region") + if err != nil { + gcpRegion = "us-central1" + } + // Get some additional configuration values or use defaults + cfg := config.New(ctx, "") + nodesPerZone, err := cfg.TryInt("nodesPerZone") + if err != nil { + nodesPerZone = 1 + } + + // Create a new network + gkeNetwork, err := compute.NewNetwork(ctx, "gke-network", &compute.NetworkArgs{ + AutoCreateSubnetworks: pulumi.Bool(false), + Description: pulumi.String("A virtual network for your GKE cluster(s)"), + }) + if err != nil { + return err + } + + // Create a subnet in the network + gkeSubnet, err := compute.NewSubnetwork(ctx, "gke-subnet", &compute.SubnetworkArgs{ + IpCidrRange: pulumi.String("10.128.0.0/12"), + Network: gkeNetwork.ID(), + PrivateIpGoogleAccess: pulumi.Bool(true), + }) + if err != nil { + return err + } + + // Create a new GKE cluster + gkeCluster, err := container.NewCluster(ctx, "gke-cluster", &container.ClusterArgs{ + AddonsConfig: &container.ClusterAddonsConfigArgs{ + DnsCacheConfig: &container.ClusterAddonsConfigDnsCacheConfigArgs{ + Enabled: pulumi.Bool(true), + }, + }, + BinaryAuthorization: &container.ClusterBinaryAuthorizationArgs{ + EvaluationMode: pulumi.String("PROJECT_SINGLETON_POLICY_ENFORCE"), + }, + DatapathProvider: pulumi.String("ADVANCED_DATAPATH"), + Description: pulumi.String("A GKE cluster"), + InitialNodeCount: pulumi.Int(1), + IpAllocationPolicy: &container.ClusterIpAllocationPolicyArgs{ + ClusterIpv4CidrBlock: pulumi.String("/14"), + ServicesIpv4CidrBlock: pulumi.String("/20"), + }, + Location: pulumi.String(gcpRegion), + MasterAuthorizedNetworksConfig: &container.ClusterMasterAuthorizedNetworksConfigArgs{ + CidrBlocks: container.ClusterMasterAuthorizedNetworksConfigCidrBlockArray{ + &container.ClusterMasterAuthorizedNetworksConfigCidrBlockArgs{ + CidrBlock: pulumi.String("0.0.0.0/0"), + DisplayName: pulumi.String("All networks"), + }, + }, + }, + Network: gkeNetwork.Name, + NetworkingMode: pulumi.String("VPC_NATIVE"), + PrivateClusterConfig: &container.ClusterPrivateClusterConfigArgs{ + EnablePrivateNodes: pulumi.Bool(true), + EnablePrivateEndpoint: pulumi.Bool(false), + MasterIpv4CidrBlock: pulumi.String("10.100.0.0/28"), + }, + RemoveDefaultNodePool: pulumi.Bool(true), + ReleaseChannel: &container.ClusterReleaseChannelArgs{ + Channel: pulumi.String("STABLE"), + }, + Subnetwork: gkeSubnet.Name, + WorkloadIdentityConfig: &container.ClusterWorkloadIdentityConfigArgs{ + WorkloadPool: pulumi.String(fmt.Sprintf("%v.svc.id.goog", gcpProject)), + }, + }) + if err != nil { + return err + } + + // Create a GCP Service Account for the node pool + gkeNodepoolSa, err := serviceaccount.NewAccount(ctx, "gke-nodepool-sa", &serviceaccount.AccountArgs{ + AccountId: pulumi.Sprintf("%v-np-1-sa", gkeCluster.Name), + DisplayName: pulumi.String("Nodepool 1 Service Account"), + }) + if err != nil { + return err + } + + // Create a new node pool + _, err = container.NewNodePool(ctx, "gke-nodepool", &container.NodePoolArgs{ + Cluster: gkeCluster.ID(), + NodeCount: pulumi.Int(nodesPerZone), + NodeConfig: &container.NodePoolNodeConfigArgs{ + OauthScopes: pulumi.StringArray{ + pulumi.String("https://www.googleapis.com/auth/cloud-platform"), + }, + ServiceAccount: gkeNodepoolSa.Email, + }, + }) + if err != nil { + return err + } + + // Build Kubeconfig for accessing the cluster + clusterKubeconfig := pulumi.Sprintf(`apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: %[3]s + server: https://%[2]s + name: %[1]s +contexts: +- context: + cluster: %[1]s + user: %[1]s + name: %[1]s +current-context: %[1]s +kind: Config +preferences: {} +users: +- name: %[1]s + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: gke-gcloud-auth-plugin + installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + provideClusterInfo: true + `, gkeCluster.Name, gkeCluster.Endpoint, gkeCluster.MasterAuth.ClusterCaCertificate().Elem()) + + // Export some values for use elsewhere + ctx.Export("networkName", gkeNetwork.Name) + ctx.Export("networkId", gkeNetwork.ID()) + ctx.Export("clusterName", gkeCluster.Name) + ctx.Export("clusterId", gkeCluster.ID()) + ctx.Export("kubeconfig", clusterKubeconfig) + return nil + }) +} diff --git a/kubernetes-gcp-python/.gitignore b/kubernetes-gcp-python/.gitignore new file mode 100644 index 000000000..b664ab4e4 --- /dev/null +++ b/kubernetes-gcp-python/.gitignore @@ -0,0 +1,2 @@ +*.pyc +venv/ \ No newline at end of file diff --git a/kubernetes-gcp-python/Pulumi.yaml b/kubernetes-gcp-python/Pulumi.yaml new file mode 100644 index 000000000..002f9d8fa --- /dev/null +++ b/kubernetes-gcp-python/Pulumi.yaml @@ -0,0 +1,14 @@ +name: ${PROJECT} +description: ${DESCRIPTION} +runtime: python +template: + description: A Python program to create a Kubernetes cluster on Google Cloud + config: + gcp:project: + description: The Google Cloud project to deploy into + gcp:region: + default: us-central1 + description: The Google Cloud region to deploy into + nodesPerZone: + default: 1 + description: The desired number of nodes PER ZONE in the nodepool diff --git a/kubernetes-gcp-python/__main__.py b/kubernetes-gcp-python/__main__.py new file mode 100644 index 000000000..3b2b39609 --- /dev/null +++ b/kubernetes-gcp-python/__main__.py @@ -0,0 +1,122 @@ +import pulumi +import pulumi_gcp as gcp + +# Get some provider-namespaced configuration values +provider_cfg = pulumi.Config("gcp") +gcp_project = provider_cfg.require("project") +gcp_region = provider_cfg.get("region", "us-central1") +# Get some additional configuration values +config = pulumi.Config() +nodes_per_zone = config.get_float("nodesPerZone", 1) + +# Create a new network +gke_network = gcp.compute.Network( + "gke-network", + auto_create_subnetworks=False, + description="A virtual network for your GKE cluster(s)" +) + +# Create a subnet in the new network +gke_subnet = gcp.compute.Subnetwork( + "gke-subnet", + ip_cidr_range="10.128.0.0/12", + network=gke_network.id, + private_ip_google_access=True +) + +# Create a cluster in the new network and subnet +gke_cluster = gcp.container.Cluster( + "gke-cluster", + addons_config=gcp.container.ClusterAddonsConfigArgs( + dns_cache_config=gcp.container.ClusterAddonsConfigDnsCacheConfigArgs( + enabled=True + ), + ), + binary_authorization=gcp.container.ClusterBinaryAuthorizationArgs( + evaluation_mode="PROJECT_SINGLETON_POLICY_ENFORCE" + ), + datapath_provider="ADVANCED_DATAPATH", + description="A GKE cluster", + initial_node_count=1, + ip_allocation_policy=gcp.container.ClusterIpAllocationPolicyArgs( + cluster_ipv4_cidr_block="/14", + services_ipv4_cidr_block="/20" + ), + location=gcp_region, + master_authorized_networks_config=gcp.container.ClusterMasterAuthorizedNetworksConfigArgs( + cidr_blocks=[gcp.container.ClusterMasterAuthorizedNetworksConfigCidrBlockArgs( + cidr_block="0.0.0.0/0", + display_name="All networks" + )] + ), + network=gke_network.name, + networking_mode="VPC_NATIVE", + private_cluster_config=gcp.container.ClusterPrivateClusterConfigArgs( + enable_private_nodes=True, + enable_private_endpoint=False, + master_ipv4_cidr_block="10.100.0.0/28" + ), + remove_default_node_pool=True, + release_channel=gcp.container.ClusterReleaseChannelArgs( + channel="STABLE" + ), + subnetwork=gke_subnet.name, + workload_identity_config=gcp.container.ClusterWorkloadIdentityConfigArgs( + workload_pool=f"{gcp_project}.svc.id.goog" + ) +) + +# Create a GCP service account for the nodepool +gke_nodepool_sa = gcp.serviceaccount.Account( + "gke-nodepool-sa", + account_id=pulumi.Output.concat(gke_cluster.name, "-np-1-sa"), + display_name="Nodepool 1 Service Account" +) + +# Create a nodepool for the cluster +gke_nodepool = gcp.container.NodePool( + "gke-nodepool", + cluster=gke_cluster.id, + node_count=nodes_per_zone, + node_config=gcp.container.NodePoolNodeConfigArgs( + oauth_scopes=["https://www.googleapis.com/auth/cloud-platform"], + service_account=gke_nodepool_sa.email + ) +) + +# Build a Kubeconfig to access the cluster +cluster_kubeconfig = pulumi.Output.all( + gke_cluster.master_auth.cluster_ca_certificate, + gke_cluster.endpoint, + gke_cluster.name).apply(lambda l: + f"""apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: {l[0]} + server: https://{l[1]} + name: {l[2]} +contexts: +- context: + cluster: {l[2]} + user: {l[2]} + name: {l[2]} +current-context: {l[2]} +kind: Config +preferences: {{}} +users: +- name: {l[2]} + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: gke-gcloud-auth-plugin + installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + provideClusterInfo: true +""") + +# Export some values for use elsewhere +pulumi.export("networkName", gke_network.name) +pulumi.export("networkId", gke_network.id) +pulumi.export("clusterName", gke_cluster.name) +pulumi.export("clusterId", gke_cluster.id) +pulumi.export("kubeconfig", cluster_kubeconfig) diff --git a/kubernetes-gcp-python/requirements.txt b/kubernetes-gcp-python/requirements.txt new file mode 100644 index 000000000..2b2806482 --- /dev/null +++ b/kubernetes-gcp-python/requirements.txt @@ -0,0 +1,2 @@ +pulumi>=3.0.0,<4.0.0 +pulumi-gcp==6.39.0 diff --git a/kubernetes-gcp-typescript/.gitignore b/kubernetes-gcp-typescript/.gitignore new file mode 100644 index 000000000..dc902b57a --- /dev/null +++ b/kubernetes-gcp-typescript/.gitignore @@ -0,0 +1,2 @@ +/bin/ +/node_modules/ \ No newline at end of file diff --git a/kubernetes-gcp-typescript/Pulumi.yaml b/kubernetes-gcp-typescript/Pulumi.yaml new file mode 100644 index 000000000..14ea8091b --- /dev/null +++ b/kubernetes-gcp-typescript/Pulumi.yaml @@ -0,0 +1,14 @@ +name: ${PROJECT} +description: ${DESCRIPTION} +runtime: nodejs +template: + description: A TypeScript program to create a Kubernetes cluster on Google Cloud + config: + gcp:project: + description: The Google Cloud project to deploy into + gcp:region: + default: us-central1 + description: The Google Cloud region to deploy into + nodesPerZone: + default: 1 + description: The desired number of nodes PER ZONE in the nodepool diff --git a/kubernetes-gcp-typescript/index.ts b/kubernetes-gcp-typescript/index.ts new file mode 100644 index 000000000..ef30cbd41 --- /dev/null +++ b/kubernetes-gcp-typescript/index.ts @@ -0,0 +1,113 @@ +import * as pulumi from "@pulumi/pulumi"; +import * as gcp from "@pulumi/gcp"; + +// Get some provider-namespaced configuration values +const providerCfg = new pulumi.Config("gcp"); +const gcpProject = providerCfg.require("project"); +const gcpRegion = providerCfg.get("region") || "us-central1"; +// Get some other configuration values or use defaults +const cfg = new pulumi.Config(); +const nodesPerZone = cfg.getNumber("nodesPerZone") || 1; + +// Create a new network +const gkeNetwork = new gcp.compute.Network("gke-network", { + autoCreateSubnetworks: false, + description: "A virtual network for your GKE cluster(s)", +}); + +// Create a new subnet in the network created above +const gkeSubnet = new gcp.compute.Subnetwork("gke-subnet", { + ipCidrRange: "10.128.0.0/12", + network: gkeNetwork.id, + privateIpGoogleAccess: true, +}); + +// Create a new GKE cluster +const gkeCluster = new gcp.container.Cluster("gke-cluster", { + addonsConfig: { + dnsCacheConfig: { + enabled: true, + }, + }, + binaryAuthorization: { + evaluationMode: "PROJECT_SINGLETON_POLICY_ENFORCE", + }, + datapathProvider: "ADVANCED_DATAPATH", + description: "A GKE cluster", + initialNodeCount: 1, + ipAllocationPolicy: { + clusterIpv4CidrBlock: "/14", + servicesIpv4CidrBlock: "/20", + }, + location: gcpRegion, + masterAuthorizedNetworksConfig: { + cidrBlocks: [{ + cidrBlock: "0.0.0.0/0", + displayName: "All networks", + }], + }, + network: gkeNetwork.name, + networkingMode: "VPC_NATIVE", + privateClusterConfig: { + enablePrivateNodes: true, + enablePrivateEndpoint: false, + masterIpv4CidrBlock: "10.100.0.0/28", + }, + removeDefaultNodePool: true, + releaseChannel: { + channel: "STABLE", + }, + subnetwork: gkeSubnet.name, + workloadIdentityConfig: { + workloadPool: `${gcpProject}.svc.id.goog`, + }, +}); + +// Create a service account for the node pool +const gkeNodepoolSa = new gcp.serviceaccount.Account("gke-nodepool-sa", { + accountId: pulumi.interpolate `${gkeCluster.name}-np-1-sa`, + displayName: "Nodepool 1 Service Account", +}); + +// Create a nodepool for the GKE cluster +const gkeNodepool = new gcp.container.NodePool("gke-nodepool", { + cluster: gkeCluster.id, + nodeCount: nodesPerZone, + nodeConfig: { + oauthScopes: ["https://www.googleapis.com/auth/cloud-platform"], + serviceAccount: gkeNodepoolSa.email, + }, +}); + +// Build a Kubeconfig for accessing the cluster +const clusterKubeconfig = pulumi.interpolate `apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: ${gkeCluster.masterAuth.clusterCaCertificate} + server: https://${gkeCluster.endpoint} + name: ${gkeCluster.name} +contexts: +- context: + cluster: ${gkeCluster.name} + user: ${gkeCluster.name} + name: ${gkeCluster.name} +current-context: ${gkeCluster.name} +kind: Config +preferences: {} +users: +- name: ${gkeCluster.name} + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: gke-gcloud-auth-plugin + installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + provideClusterInfo: true +`; + +// Export some values for use elsewhere +export const networkName = gkeNetwork.name; +export const networkId = gkeNetwork.id; +export const clusterName = gkeCluster.name; +export const clusterId = gkeCluster.id; +export const kubeconfig = clusterKubeconfig; diff --git a/kubernetes-gcp-typescript/package.json b/kubernetes-gcp-typescript/package.json new file mode 100644 index 000000000..889dd0ec5 --- /dev/null +++ b/kubernetes-gcp-typescript/package.json @@ -0,0 +1,11 @@ +{ + "name": "kubernetes-gcp-typescript", + "devDependencies": { + "@types/node": "^14" + }, + "dependencies": { + "typescript": "^4.0.0", + "@pulumi/pulumi": "^3.0.0", + "@pulumi/gcp": "6.39.0" + } +} \ No newline at end of file diff --git a/kubernetes-gcp-typescript/tsconfig.json b/kubernetes-gcp-typescript/tsconfig.json new file mode 100644 index 000000000..f65a38d4e --- /dev/null +++ b/kubernetes-gcp-typescript/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "strict": true, + "outDir": "bin", + "target": "es2016", + "module": "commonjs", + "moduleResolution": "node", + "sourceMap": true, + "experimentalDecorators": true, + "pretty": true, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "forceConsistentCasingInFileNames": true + }, + "files": [ + "index.ts" + ] +} \ No newline at end of file diff --git a/kubernetes-gcp-yaml/Pulumi.yaml b/kubernetes-gcp-yaml/Pulumi.yaml new file mode 100644 index 000000000..42c80f23c --- /dev/null +++ b/kubernetes-gcp-yaml/Pulumi.yaml @@ -0,0 +1,129 @@ +name: ${PROJECT} +description: ${DESCRIPTION} +runtime: yaml +template: + description: A YAML program to create a Kubernetes cluster on Google Cloud + config: + gcp:project: + description: The Google Cloud project to deploy into + gcp:region: + default: us-central1 + description: The Google Cloud region to deploy into + nodesPerZone: + default: 1 + description: The desired number of nodes PER ZONE in the nodepool + +configuration: + gcp:project: + type: String + gcp:region: + type: String + default: us-central1 + nodesPerZone: + type: Number + default: 1 + +resources: + # Create a GCP network (global VPC) + gke-network: + type: gcp:compute:Network + properties: + # Disable autoCreateSubnetworks because Private Google Access is needed + autoCreateSubnetworks: false + description: A virtual network for your GKE cluster(s) + # Create a subnet in the new GCP network + gke-subnet: + type: gcp:compute:Subnetwork + properties: + ipCidrRange: 10.128.0.0/12 + network: ${gke-network.id} + privateIpGoogleAccess: true + # Create a new GKE cluster + gke-cluster: + type: gcp:container:Cluster + properties: + addonsConfig: + dnsCacheConfig: + enabled: true + binaryAuthorization: + evaluationMode: PROJECT_SINGLETON_POLICY_ENFORCE + datapathProvider: ADVANCED_DATAPATH + description: A GKE cluster + # Enabling Autopilot will invalidate many of the other settings included here + # enableAutopilot: false + initialNodeCount: 1 + ipAllocationPolicy: + clusterIpv4CidrBlock: /14 + servicesIpv4CidrBlock: /20 + location: ${gcp:region} + masterAuthorizedNetworksConfig: + cidrBlocks: + # Change this CIDR block to something more restrictive for enhanced security + - cidrBlock: 0.0.0.0/0 + displayName: All networks + network: ${gke-network.name} + networkingMode: VPC_NATIVE + privateClusterConfig: + enablePrivateNodes: true + # Changing this to true requires some form of connectivity to GCP (VPN or equivalent) + enablePrivateEndpoint: false + masterIpv4CidrBlock: 10.100.0.0/28 + removeDefaultNodePool: true + releaseChannel: + channel: STABLE + subnetwork: ${gke-subnet.name} + workloadIdentityConfig: + workloadPool: ${gcp:project}.svc.id.goog + # Create a new service account for the nodepool + gke-nodepool-sa: + type: gcp:serviceAccount:Account + properties: + accountId: ${gke-cluster.name}-np-1-sa + displayName: Nodepool 1 Service Account + # Create a new nodepool for the cluster + gke-nodepool: + type: gcp:container:NodePool + properties: + cluster: ${gke-cluster.id} + # Specify the number of nodes PER ZONE + nodeCount: ${nodesPerZone} + nodeConfig: + # These scopes should be tightened down to only the required services/access + oauthScopes: + - https://www.googleapis.com/auth/cloud-platform + serviceAccount: ${gke-nodepool-sa.email} + +variables: + clusterKubeconfig: + fn::secret: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: ${gke-cluster.masterAuth["clusterCaCertificate"]} + server: https://${gke-cluster.endpoint} + name: ${gke-cluster.name} + contexts: + - context: + cluster: ${gke-cluster.name} + user: ${gke-cluster.name} + name: ${gke-cluster.name} + current-context: ${gke-cluster.name} + kind: Config + preferences: {} + users: + - name: ${gke-cluster.name} + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: gke-gcloud-auth-plugin + installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + provideClusterInfo: true + +outputs: + # Export some values to be used elsewhere + networkName: ${gke-network.name} + networkId: ${gke-network.id} + clusterName: ${gke-cluster.name} + clusterId: ${gke-cluster.id} + kubeconfig: ${clusterKubeconfig} diff --git a/kubernetes-gcp-yaml/Pulumi.yaml.append b/kubernetes-gcp-yaml/Pulumi.yaml.append new file mode 100644 index 000000000..c5b185536 --- /dev/null +++ b/kubernetes-gcp-yaml/Pulumi.yaml.append @@ -0,0 +1,114 @@ +configuration: + gcp:project: + type: String + gcp:region: + type: String + default: us-central1 + nodesPerZone: + type: Number + default: 1 + +resources: + # Create a GCP network (global VPC) + gke-network: + type: gcp:compute:Network + properties: + # Disable autoCreateSubnetworks because Private Google Access is needed + autoCreateSubnetworks: false + description: A virtual network for your GKE cluster(s) + # Create a subnet in the new GCP network + gke-subnet: + type: gcp:compute:Subnetwork + properties: + ipCidrRange: 10.128.0.0/12 + network: ${gke-network.id} + privateIpGoogleAccess: true + # Create a new GKE cluster + gke-cluster: + type: gcp:container:Cluster + properties: + addonsConfig: + dnsCacheConfig: + enabled: true + binaryAuthorization: + evaluationMode: PROJECT_SINGLETON_POLICY_ENFORCE + datapathProvider: ADVANCED_DATAPATH + description: A GKE cluster + # Enabling Autopilot will invalidate many of the other settings included here + # enableAutopilot: false + initialNodeCount: 1 + ipAllocationPolicy: + clusterIpv4CidrBlock: /14 + servicesIpv4CidrBlock: /20 + location: ${gcp:region} + masterAuthorizedNetworksConfig: + cidrBlocks: + # Change this CIDR block to something more restrictive for enhanced security + - cidrBlock: 0.0.0.0/0 + displayName: All networks + network: ${gke-network.name} + networkingMode: VPC_NATIVE + privateClusterConfig: + enablePrivateNodes: true + # Changing this to true requires some form of connectivity to GCP (VPN or equivalent) + enablePrivateEndpoint: false + masterIpv4CidrBlock: 10.100.0.0/28 + removeDefaultNodePool: true + releaseChannel: + channel: STABLE + subnetwork: ${gke-subnet.name} + workloadIdentityConfig: + workloadPool: ${gcp:project}.svc.id.goog + # Create a new service account for the nodepool + gke-nodepool-sa: + type: gcp:serviceAccount:Account + properties: + accountId: ${gke-cluster.name}-np-1-sa + displayName: Nodepool 1 Service Account + # Create a new nodepool for the cluster + gke-nodepool: + type: gcp:container:NodePool + properties: + cluster: ${gke-cluster.id} + # Specify the number of nodes PER ZONE + nodeCount: ${nodesPerZone} + nodeConfig: + # These scopes should be tightened down to only the required services/access + oauthScopes: + - https://www.googleapis.com/auth/cloud-platform + serviceAccount: ${gke-nodepool-sa.email} + +variables: + clusterKubeconfig: + fn::secret: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: ${gke-cluster.masterAuth["clusterCaCertificate"]} + server: https://${gke-cluster.endpoint} + name: ${gke-cluster.name} + contexts: + - context: + cluster: ${gke-cluster.name} + user: ${gke-cluster.name} + name: ${gke-cluster.name} + current-context: ${gke-cluster.name} + kind: Config + preferences: {} + users: + - name: ${gke-cluster.name} + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: gke-gcloud-auth-plugin + installHint: Install gke-gcloud-auth-plugin for use with kubectl by following + https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke + provideClusterInfo: true + +outputs: + # Export some values to be used elsewhere + networkName: ${gke-network.name} + networkId: ${gke-network.id} + clusterName: ${gke-cluster.name} + clusterId: ${gke-cluster.id} + kubeconfig: ${clusterKubeconfig}