Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(kubernetes): control plane ip filter for cluster resource #401

Merged
merged 6 commits into from
Sep 6, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ See updating [Changelog example here](https://keepachangelog.com/en/1.0.0/)

### Added
- gateway: add read-only `addresses` field
- kubernetes: `control_plane_ip_filter` field to `upcloud_kubernetes_cluster` resource

### Changed
- kubernetes: remove node group maximum value validation. The maximum number of nodes (in the cluster) is determined by the cluster plan and the validation is done on the API side.
Expand Down
8 changes: 5 additions & 3 deletions examples/resources/upcloud_kubernetes_cluster/resource.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@ resource "upcloud_network" "example" {

# Create a Kubernetes cluster
resource "upcloud_kubernetes_cluster" "example" {
name = "exampleapp"
network = upcloud_network.example.id
zone = "de-fra1"
# Allow access the cluster control plane from any external source.
villevsv-upcloud marked this conversation as resolved.
Show resolved Hide resolved
control_plane_ip_filter = ["0.0.0.0/0"]
name = "exampleapp"
network = upcloud_network.example.id
zone = "de-fra1"
}

# Kubernetes cluster with private node groups requires a network that is routed through NAT gateway.
Expand Down
92 changes: 72 additions & 20 deletions internal/service/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ const (
clientCertificateDescription = "TLS authentication client certificate, encoded (PEM)."
clientKeyDescription = "Key to pair with `client_certificate`, encoded (PEM)."
clusterCACertificateDescription = "TLS authentication root certificate bundle, encoded (PEM)."
controlPlaneIPFilterDescription = "IP addresses or IP ranges in CIDR format which are allowed to access the cluster control plane. To allow access from any source, use `[\"0.0.0.0/0\"]`. To deny access from all sources, use `[]`. Values set here do not restrict access to node groups or exposed Kubernetes services."
hostDescription = "Hostname of the cluster API. Defined as URI."
idDescription = "Cluster ID."
kubeconfigDescription = "Kubernetes config file contents for the cluster."
Expand All @@ -40,44 +41,33 @@ func ResourceCluster() *schema.Resource {
Description: "This resource represents a Managed Kubernetes cluster.",
CreateContext: resourceClusterCreate,
ReadContext: resourceClusterRead,
UpdateContext: resourceClusterUpdate,
DeleteContext: resourceClusterDelete,
Importer: &schema.ResourceImporter{
StateContext: schema.ImportStatePassthroughContext,
},
Schema: map[string]*schema.Schema{
"control_plane_ip_filter": {
Description: controlPlaneIPFilterDescription,
Type: schema.TypeSet,
Required: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"name": {
Description: nameDescription,
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateDiagFunc: validateResourceName,
},
"zone": {
Description: zoneDescription,
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"plan": {
Description: "The pricing plan used for the cluster. Default plan is `development`. You can list available plans with `upctl kubernetes plans`.",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "development",
},
"network": {
Description: networkDescription,
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"private_node_groups": {
Description: "Enable private node groups. Private node groups requires a network that is routed through NAT gateway.",
Type: schema.TypeBool,
Default: false,
Optional: true,
ForceNew: true,
},
"network_cidr": {
Description: networkCIDRDescription,
Type: schema.TypeString,
Expand All @@ -91,11 +81,31 @@ func ResourceCluster() *schema.Resource {
Type: schema.TypeString,
},
},
"plan": {
Description: "The pricing plan used for the cluster. Default plan is `development`. You can list available plans with `upctl kubernetes plans`.",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "development",
},
"private_node_groups": {
Description: "Enable private node groups. Private node groups requires a network that is routed through NAT gateway.",
Type: schema.TypeBool,
Default: false,
Optional: true,
ForceNew: true,
},
"state": {
Description: stateDescription,
Type: schema.TypeString,
Computed: true,
},
"zone": {
Description: zoneDescription,
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
},
}
}
Expand All @@ -111,6 +121,12 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int
PrivateNodeGroups: d.Get("private_node_groups").(bool),
}

req.ControlPlaneIPFilter = make([]string, 0)
filters := d.Get("control_plane_ip_filter")
for _, v := range filters.(*schema.Set).List() {
req.ControlPlaneIPFilter = append(req.ControlPlaneIPFilter, v.(string))
}

c, err := svc.CreateKubernetesCluster(ctx, req)
if err != nil {
return diag.FromErr(err)
Expand Down Expand Up @@ -147,6 +163,36 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter
return setClusterResourceData(d, cluster)
}

func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) (diags diag.Diagnostics) {
svc := meta.(*service.Service)

req := &request.ModifyKubernetesClusterRequest{
ClusterUUID: d.Id(),
}

req.Cluster.ControlPlaneIPFilter = make([]string, 0)
filters := d.Get("control_plane_ip_filter")
for _, v := range filters.(*schema.Set).List() {
req.Cluster.ControlPlaneIPFilter = append(req.Cluster.ControlPlaneIPFilter, v.(string))
}

c, err := svc.ModifyKubernetesCluster(ctx, req)
if err != nil {
return diag.FromErr(err)
}

c, err = svc.WaitForKubernetesClusterState(ctx, &request.WaitForKubernetesClusterStateRequest{
DesiredState: upcloud.KubernetesClusterStateRunning,
Timeout: time.Minute * 20,
UUID: c.UUID,
})
if err != nil {
return diag.FromErr(err)
}

return setClusterResourceData(d, c)
}

func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
svc := meta.(*service.Service)
if err := svc.DeleteKubernetesCluster(ctx, &request.DeleteKubernetesClusterRequest{UUID: d.Id()}); err != nil {
Expand Down Expand Up @@ -205,6 +251,12 @@ func setClusterResourceData(d *schema.ResourceData, c *upcloud.KubernetesCluster
return diag.FromErr(err)
}

filters := make([]string, 0)
filters = append(filters, c.ControlPlaneIPFilter...)
if err := d.Set("control_plane_ip_filter", filters); err != nil {
return diag.FromErr(err)
}

return diags
}

Expand Down
1 change: 1 addition & 0 deletions upcloud/resource_upcloud_kubernetes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ func TestAccUpcloudKubernetes(t *testing.T) {
{
Config: testDataS1,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckTypeSetElemAttr(cName, "control_plane_ip_filter.*", "0.0.0.0/0"),
resource.TestCheckResourceAttr(cName, "name", "tf-acc-test-uks"),
resource.TestCheckResourceAttr(cName, "zone", "fi-hel2"),
resource.TestCheckResourceAttr(g1Name, "name", "small"),
Expand Down
19 changes: 11 additions & 8 deletions upcloud/testdata/upcloud_kubernetes/kubernetes_s1.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,17 @@ resource "upcloud_network" "main" {
}

resource "upcloud_kubernetes_cluster" "main" {
name = var.name
network = upcloud_network.main.id
zone = var.zone
control_plane_ip_filter = ["0.0.0.0/0"]
name = var.name
network = upcloud_network.main.id
zone = var.zone
}

resource "upcloud_kubernetes_node_group" "g1" {
cluster = upcloud_kubernetes_cluster.main.id
node_count = 2
anti_affinity = true
labels = {
labels = {
env = "dev"
managedBy = "tf"
}
Expand All @@ -54,13 +55,15 @@ resource "upcloud_kubernetes_node_group" "g1" {
resource "upcloud_kubernetes_node_group" "g2" {
cluster = upcloud_kubernetes_cluster.main.id
node_count = 1
labels = {
labels = {
env = "qa"
managedBy = "tf"
}
name = "medium"
plan = "2xCPU-4GB"
ssh_keys = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO3fnjc8UrsYDNU8365mL3lnOPQJg18V42Lt8U/8Sm+r testt_test"]
name = "medium"
plan = "2xCPU-4GB"
ssh_keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO3fnjc8UrsYDNU8365mL3lnOPQJg18V42Lt8U/8Sm+r testt_test"
]
utility_network_access = false
}

Expand Down
23 changes: 13 additions & 10 deletions upcloud/testdata/upcloud_kubernetes/kubernetes_s2.tf
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,18 @@ resource "upcloud_network" "main" {
}

resource "upcloud_kubernetes_cluster" "main" {
name = var.name
network = upcloud_network.main.id
zone = var.zone
control_plane_ip_filter = ["0.0.0.0/0"]
villevsv-upcloud marked this conversation as resolved.
Show resolved Hide resolved
name = var.name
network = upcloud_network.main.id
zone = var.zone
}

resource "upcloud_kubernetes_node_group" "g1" {
cluster = upcloud_kubernetes_cluster.main.id
anti_affinity = true
# scale node count down
node_count = 1
labels = {
node_count = 1
labels = {
env = "dev"
managedBy = "tf"
}
Expand All @@ -52,16 +53,18 @@ resource "upcloud_kubernetes_node_group" "g1" {
}

resource "upcloud_kubernetes_node_group" "g2" {
cluster = upcloud_kubernetes_cluster.main.id
cluster = upcloud_kubernetes_cluster.main.id
# scale node count up
node_count = 2
labels = {
labels = {
env = "qa"
managedBy = "tf"
}
name = "medium"
plan = "2xCPU-4GB"
ssh_keys = ["ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO3fnjc8UrsYDNU8365mL3lnOPQJg18V42Lt8U/8Sm+r testt_test"]
name = "medium"
plan = "2xCPU-4GB"
ssh_keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO3fnjc8UrsYDNU8365mL3lnOPQJg18V42Lt8U/8Sm+r testt_test"
]
utility_network_access = false
}

Expand Down