Skip to content

Commit

Permalink
wip: Added placement + unit test
Browse files Browse the repository at this point in the history
  • Loading branch information
jmolmo committed Dec 14, 2021
1 parent fada563 commit 3818938
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 6 deletions.
1 change: 1 addition & 0 deletions config/rbac/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ resources:
- service_account.yaml
- role.yaml
- role_binding.yaml
- sccs.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
Expand Down
27 changes: 21 additions & 6 deletions controllers/topolvm_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,7 @@ func (n topolvmNode) getName() string {

func (n topolvmNode) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error {
nodeDaemonSet := getNodeDaemonSet(lvmCluster)
result, err := cutil.CreateOrUpdate(ctx, r.Client, nodeDaemonSet, func() error {
// make sure LVMCluster CR garbage collects this daemonset and also block owner removal
return cutil.SetControllerReference(lvmCluster, nodeDaemonSet, r.Scheme)
})
result, err := cutil.CreateOrUpdate(ctx, r.Client, nodeDaemonSet, func() error { return nil })

switch result {
case cutil.OperationResultCreated:
Expand All @@ -46,6 +43,7 @@ func (n topolvmNode) ensureCreated(r *LVMClusterReconciler, ctx context.Context,
return err
}

fmt.Printf("JMO ----> result: %s --- name: %s ", result, nodeDaemonSet.Name)
return nil
}

Expand Down Expand Up @@ -73,6 +71,10 @@ func (n topolvmNode) updateStatus(r *LVMClusterReconciler, ctx context.Context,
return nil
}

func extractNodeSelectorAndTolerations(lvmCluster lvmv1alpha1.LVMCluster) (*corev1.NodeSelector, []corev1.Toleration) {
// TODO: use the function defined in vgmanager.go ( possible move the function to common utils location)
return nil, nil
}
func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster) *v1.DaemonSet {
hostPathDirectory := corev1.HostPathDirectory
hostPathDirectoryOrCreateType := corev1.HostPathDirectoryOrCreate
Expand Down Expand Up @@ -112,7 +114,19 @@ func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster) *v1.DaemonSet {
iContainers := []corev1.Container{*getInitContainer()}
containers := []corev1.Container{*getLvmdContainer(), *getNodeContainer(), *getCsiRegistrarContainer(), *getLivenessProbeContainer()}

// TODO: Add the same node selector we will have in the lvmcluster CRD
// Affinity and tolerations
nodeSelector, tolerations := extractNodeSelectorAndTolerations(*lvmCluster)
topolvmNodeAffinity := &corev1.Affinity{}
if nodeSelector != nil {
topolvmNodeAffinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: nodeSelector},
}
}
topolvmNodeTolerations := []corev1.Toleration{{Operator: corev1.TolerationOpExists}}
if tolerations != nil {
topolvmNodeTolerations = tolerations
}

nodeDaemonSet := &v1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: TopolvmNodeDaemonsetName,
Expand Down Expand Up @@ -140,7 +154,8 @@ func getNodeDaemonSet(lvmCluster *lvmv1alpha1.LVMCluster) *v1.DaemonSet {
Containers: containers,
Volumes: volumes,
HostPID: true,
Tolerations: []corev1.Toleration{{Operator: corev1.TolerationOpExists}},
Tolerations: topolvmNodeTolerations,
Affinity: topolvmNodeAffinity,
},
},
},
Expand Down
96 changes: 96 additions & 0 deletions controllers/topolvm_node_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
"context"
"fmt"
"sigs.k8s.io/controller-runtime/pkg/client"
"time"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
lvmv1alpha1 "github.com/red-hat-storage/lvm-operator/api/v1alpha1"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

var _ = Describe("LVMCluster controller", func() {

const (
timeout = time.Second * 10
interval = time.Millisecond * 250
)

ctx := context.Background()

// LVMCluster CR details
lvmClusterName := types.NamespacedName{Name: "test-lvmcluster-node", Namespace: testLvmClusterNamespace}
lvmClusterOut := &lvmv1alpha1.LVMCluster{}
lvmClusterIn := &lvmv1alpha1.LVMCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test-lvmcluster-node",
Namespace: testLvmClusterNamespace,
},
Spec: lvmv1alpha1.LVMClusterSpec{
DeviceClasses: []lvmv1alpha1.DeviceClass{{Name: "test"}},
},
}

csiNodeName := types.NamespacedName{Name: TopolvmNodeDaemonsetName, Namespace: testLvmClusterNamespace}
csiNodeOut := &v1.DaemonSet{}

Context("LVMCluster reconciliation on installing CR", func() {
It("should reconcile LVMCluster CR creation, ", func() {
By("verifying CR status.Ready is set to true on reconciliation")
Expect(k8sClient.Create(ctx, lvmClusterIn)).Should(Succeed())

// placeholder to check CR status.Ready field to be true
Eventually(func() bool {
err := k8sClient.Get(ctx, lvmClusterName, lvmClusterOut)
if err != nil {
return false
}
return lvmClusterOut.Status.Ready
}, timeout, interval).Should(Equal(true))

// presence of Node Daemonset
By("Confirming CSI Node daemonSet exists")
Eventually(func() bool {

dsl := &v1.DaemonSetList{}
err := k8sClient.List(ctx, dsl, client.InNamespace(testLvmClusterNamespace))
fmt.Printf("JMO ------------------> %v", dsl)

err = k8sClient.Get(ctx, csiNodeName, csiNodeOut)
return err == nil

}, timeout, interval).Should(BeTrue())

})
})

Context("LVMCluster reconciliation on uninstalling CR", func() {
It("should reconcile LVMCluster CR deletion ", func() {
By("confirming absence of lvm cluster CR and deletion of operator created resources")
Eventually(func() bool {
err := k8sClient.Delete(ctx, lvmClusterOut)

// deletion of LVM Cluster CR
return err != nil
}, timeout, interval).Should(BeTrue())
})
})

})

0 comments on commit 3818938

Please sign in to comment.