Skip to content

Commit

Permalink
Add k0scontrolplane heathcheck-remediation
Browse files Browse the repository at this point in the history
Signed-off-by: Adrian Pedriza <[email protected]>
  • Loading branch information
AdrianPedriza committed Nov 29, 2024
1 parent aef0261 commit 9e25f53
Show file tree
Hide file tree
Showing 8 changed files with 597 additions and 38 deletions.
1 change: 1 addition & 0 deletions .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ jobs:
- check-capi-controlplane-docker-tunneling-proxy
- check-capi-controlplane-docker-worker
- check-capi-docker-machine-change-template
- check-capi-controlplane-remediation
- check-capi-remote-machine-template-update
- check-capi-docker-machine-template-update
- check-capi-docker-machine-template-update-recreate
Expand Down
5 changes: 5 additions & 0 deletions api/controlplane/v1beta1/k0s_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ const (
const (
// ControlPlaneReadyCondition documents the status of the control plane
ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"

// RemediationInProgressAnnotation is used to keep track that a remediation is in progress,
// and more specifically it tracks that the system is in between having deleted an unhealthy machine
// and recreating its replacement.
RemediationInProgressAnnotation = "controlplane.cluster.x-k8s.io/remediation-in-progress"
)

// +kubebuilder:object:root=true
Expand Down
11 changes: 10 additions & 1 deletion internal/controller/controlplane/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,18 @@ func (c *K0sController) createMachine(ctx context.Context, name string, cluster
}
_ = ctrl.SetControllerReference(kcp, machine, c.Scheme)

return machine, c.Client.Patch(ctx, machine, client.Apply, &client.PatchOptions{
err = c.Client.Patch(ctx, machine, client.Apply, &client.PatchOptions{
FieldManager: "k0smotron",
})
if err != nil {
return machine, err
}

// Remove the annotation tracking that a remediation is in progress.
// A remediation is completed when the replacement machine has been created above.
delete(kcp.Annotations, cpv1beta1.RemediationInProgressAnnotation)

return machine, nil
}

func (c *K0sController) deleteMachine(ctx context.Context, name string, kcp *cpv1beta1.K0sControlPlane) error {
Expand Down
84 changes: 49 additions & 35 deletions internal/controller/controlplane/k0s_controlplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"strings"
"time"

"github.com/go-logr/logr"
"github.com/google/uuid"
autopilot "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2"
"github.com/k0sproject/k0smotron/internal/controller/util"
Expand Down Expand Up @@ -263,6 +264,11 @@ func (c *K0sController) reconcile(ctx context.Context, cluster *clusterv1.Cluste
return fmt.Errorf("error reconciling kubeconfig secret: %w", err)
}

err = c.reconcileUnhealthyMachines(ctx, cluster, kcp)
if err != nil {
return err
}

err = c.reconcileMachines(ctx, cluster, kcp)
if err != nil {
return err
Expand Down Expand Up @@ -406,58 +412,66 @@ func (c *K0sController) reconcileMachines(ctx context.Context, cluster *clusterv

if len(machineNamesToDelete) > 0 {
logger.Info("Found machines to delete", "count", len(machineNamesToDelete))
kubeClient, err := c.getKubeClient(ctx, cluster)
if err != nil {
return fmt.Errorf("error getting cluster client set for deletion: %w", err)
}

// Remove the oldest machine abd wait for the machine to be deleted to avoid etcd issues
machine := machines.Filter(func(m *clusterv1.Machine) bool {
machineToDelete := machines.Filter(func(m *clusterv1.Machine) bool {
return machineNamesToDelete[m.Name]
}).Oldest()
logger.Info("Found oldest machine to delete", "machine", machine.Name)
if machine.Status.Phase == string(clusterv1.MachinePhaseDeleting) {
logger.Info("Machine is being deleted, waiting for it to be deleted", "machine", machine.Name)
logger.Info("Found oldest machine to delete", "machine", machineToDelete.Name)
if machineToDelete.Status.Phase == string(clusterv1.MachinePhaseDeleting) {
logger.Info("Machine is being deleted, waiting for it to be deleted", "machine", machineToDelete.Name)
return fmt.Errorf("waiting for previous machine to be deleted")
}

name := machine.Name

waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
err = wait.PollUntilContextCancel(waitCtx, 10*time.Second, true, func(fctx context.Context) (bool, error) {
if err := c.markChildControlNodeToLeave(fctx, name, kubeClient); err != nil {
return false, fmt.Errorf("error marking controlnode to leave: %w", err)
}

ok, err := c.checkMachineLeft(fctx, name, kubeClient)
if err != nil {
logger.Error(err, "Error checking machine left", "machine", name)
}
return ok, err
})
err := c.runMachineDeletionSequence(ctx, logger, cluster, kcp, machineToDelete)
if err != nil {
return fmt.Errorf("error checking machine left: %w", err)
return err
}

if err := c.deleteControlNode(ctx, name, kubeClient); err != nil {
return fmt.Errorf("error deleting controlnode: %w", err)
}
logger.Info("Deleted machine", "machine", machineToDelete.Name)
}
return nil
}

if err := c.deleteBootstrapConfig(ctx, name, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
}
func (c *K0sController) runMachineDeletionSequence(ctx context.Context, logger logr.Logger, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane, machine *clusterv1.Machine) error {
kubeClient, err := c.getKubeClient(ctx, cluster)
if err != nil {
return fmt.Errorf("error getting cluster client set for deletion: %w", err)
}

if err := c.deleteMachineFromTemplate(ctx, name, cluster, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
waitCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
defer cancel()
err = wait.PollUntilContextCancel(waitCtx, 10*time.Second, true, func(fctx context.Context) (bool, error) {
if err := c.markChildControlNodeToLeave(fctx, machine.Name, kubeClient); err != nil {
return false, fmt.Errorf("error marking controlnode to leave: %w", err)
}

if err := c.deleteMachine(ctx, name, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
ok, err := c.checkMachineLeft(fctx, machine.Name, kubeClient)
if err != nil {
logger.Error(err, "Error checking machine left", "machine", machine.Name)
}
return ok, err
})
if err != nil {
return fmt.Errorf("error checking machine left: %w", err)
}

logger.Info("Deleted machine", "machine", name)
if err := c.deleteControlNode(ctx, machine.Name, kubeClient); err != nil {
return fmt.Errorf("error deleting controlnode: %w", err)
}

if err := c.deleteBootstrapConfig(ctx, machine.Name, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
}

if err := c.deleteMachineFromTemplate(ctx, machine.Name, cluster, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
}

if err := c.deleteMachine(ctx, machine.Name, kcp); err != nil {
return fmt.Errorf("error deleting machine from template: %w", err)
}

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
capiutil "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/secret"
Expand Down Expand Up @@ -273,7 +272,7 @@ func (c *K0smotronController) reconcile(ctx context.Context, cluster *clusterv1.

func (c *K0smotronController) ensureCertificates(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0smotronControlPlane) error {
certificates := secret.NewCertificatesForInitialControlPlane(&bootstrapv1.ClusterConfiguration{})
return certificates.LookupOrGenerate(ctx, c.Client, util.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0smotronControlPlane")))
return certificates.LookupOrGenerate(ctx, c.Client, capiutil.ObjectKey(cluster), *metav1.NewControllerRef(kcp, cpv1beta1.GroupVersion.WithKind("K0smotronControlPlane")))
}

// SetupWithManager sets up the controller with the Manager.
Expand Down
166 changes: 166 additions & 0 deletions internal/controller/controlplane/remediation.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
/*
Copyright 2024.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controlplane

import (
"context"
"fmt"

cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1"
"github.com/pkg/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)

func (c *K0sController) reconcileUnhealthyMachines(ctx context.Context, cluster *clusterv1.Cluster, kcp *cpv1beta1.K0sControlPlane) (retErr error) {
log := ctrl.LoggerFrom(ctx)

machines, err := collections.GetFilteredMachinesForCluster(ctx, c, cluster, collections.ControlPlaneMachines(cluster.Name))
if err != nil {
return fmt.Errorf("failed to filter machines for control plane: %w", err)
}

healthyMachines := machines.Filter(isHealthy)

// cleanup pending remediation actions not completed if the underlying machine is now back to healthy.
// machines to be sanitized has the following conditions:
//
// HealthCheckSucceeded=True (current machine's state is Health)
// AND
// OwnerRemediated=False (machine was marked as unhealthy previously)
err = c.sanitizeHealthyMachines(ctx, healthyMachines)
if err != nil {
return err
}
if _, ok := kcp.Annotations[cpv1beta1.RemediationInProgressAnnotation]; ok {
log.Info("Another remediation is already in progress. Skipping remediation.")
return nil
}

// retrieve machines marked as unheathy by MHC controller
unhealthyMachines := machines.Filter(collections.HasUnhealthyCondition)

// no unhealthy machines to remediate. Reconciliation can move on to the next stage.
if len(unhealthyMachines) == 0 {
return nil
}
machineToBeRemediated := unhealthyMachines.Oldest()

if !machineToBeRemediated.ObjectMeta.DeletionTimestamp.IsZero() {
log.Info("Machine to remediate is being deleted.")
return nil
}
log = log.WithValues("Machine", machineToBeRemediated)
// Always patch the machine to be remediated conditions in order to inform about remediation state.
defer func() {
derr := c.Status().Patch(ctx, machineToBeRemediated, client.Merge)
if derr != nil {
log.Error(err, "Failed to patch control plane Machine", "Machine", machineToBeRemediated.Name)
if retErr == nil {
retErr = errors.Wrapf(err, "failed to patch control plane Machine %s", machineToBeRemediated.Name)
}
return
}
}()
// Ensure that the cluster remains available during and after the remediation process. The remediation must not
// compromise the cluster's ability to serve workloads or cause disruption to the control plane's functionality.
if kcp.Status.Ready {
// The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance.
if !(machines.Len() > 1) {
log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", machines.Len())
conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1")
return nil
}

// The cluster MUST NOT have healthy machines still being provisioned. This rule prevents KCP taking actions while the cluster is in a transitional state.
if isProvisioningHealthyMachine(healthyMachines) {
log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation")
conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation")

return nil
}

// The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state.
if len(machines.Filter(collections.HasDeletionTimestamp)) > 0 {
log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation")
conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation")
return nil
}
}

// After checks, remediation can be carried out.

if err := c.runMachineDeletionSequence(ctx, log, cluster, kcp, machineToBeRemediated); err != nil {
conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityError, err.Error())
return errors.Wrapf(err, "failed to delete unhealthy machine %s", machineToBeRemediated.Name)
}
log.Info("Remediated unhealthy machine, another new machine should take its place soon.")

// Mark controlplane to track that remediation is in progress and do not proceed until machine is gone.
// This annotation is removed when new controlplane creates a new machine.
annotations.AddAnnotations(kcp, map[string]string{
cpv1beta1.RemediationInProgressAnnotation: "true",
})

return nil
}

func isHealthy(machine *clusterv1.Machine) bool {
if machine == nil {
return false
}
return conditions.IsTrue(machine, clusterv1.MachineHealthCheckSucceededCondition)
}

func hasNode(machine *clusterv1.Machine) bool {
if machine == nil {
return false
}
return machine.Status.NodeRef != nil
}

func isProvisioningHealthyMachine(healthyMachines collections.Machines) bool {
return len(healthyMachines.Filter(collections.Not(hasNode))) > 0
}

func (c *K0sController) sanitizeHealthyMachines(ctx context.Context, healthyMachines collections.Machines) error {
log := ctrl.LoggerFrom(ctx)

errList := []error{}
for _, m := range healthyMachines {
if conditions.IsFalse(m, clusterv1.MachineOwnerRemediatedCondition) && m.DeletionTimestamp.IsZero() {

conditions.Delete(m, clusterv1.MachineOwnerRemediatedCondition)

err := c.Status().Patch(ctx, m, client.Merge)
if err != nil {
log.Error(err, "Failed to patch control plane Machine to clean machine's unhealthy condition", "Machine", m.Name)
errList = append(errList, errors.Wrapf(err, "failed to patch control plane Machine %s to clean machine's unhelthy condition", m.Name))
}
}
}
if len(errList) > 0 {
return kerrors.NewAggregate(errList)
}

return nil
}
1 change: 1 addition & 0 deletions inttest/Makefile.variables
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ smoketests := \
check-capi-controlplane-docker-worker \
check-capi-controlplane-docker-tunneling \
check-capi-controlplane-docker-tunneling-proxy \
check-capi-controlplane-remediation \
check-monitoring \
check-capi-docker-machinedeployment \
check-capi-docker-clusterclass \
Expand Down
Loading

0 comments on commit 9e25f53

Please sign in to comment.