-
Notifications
You must be signed in to change notification settings - Fork 28
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
core: add flatten-rbd-pvc command #229
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
/* | ||
Copyright 2024 The Rook Authors. All rights reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package command | ||
|
||
import ( | ||
flatten_rbd_pvc "github.com/rook/kubectl-rook-ceph/pkg/flatten-rbd-pvc" | ||
|
||
"github.com/spf13/cobra" | ||
) | ||
|
||
var namespace string | ||
var allowInUse bool | ||
|
||
// FlattenRBDPVCCmd represents the rook commands | ||
var FlattenRBDPVCCmd = &cobra.Command{ | ||
Use: "flatten-rbd-pvc", | ||
Short: "Flatten the RBD image corresponding to the target RBD PVC", | ||
Long: `Flatten the RBD image corresponding to the target RBD PVC. | ||
The target RBD PVC must be a cloned image and must be created by ceph-csi. | ||
This command removes the corresponding temporary cloned image[1] | ||
if the target PVC was cloned from another PVC. | ||
[1]: https://github.com/ceph/ceph-csi/blob/devel/docs/design/proposals/rbd-snap-clone.md`, | ||
Args: cobra.ExactArgs(1), | ||
Run: func(cmd *cobra.Command, args []string) { | ||
flatten_rbd_pvc.FlattenRBDPVC(cmd.Context(), clientSets, operatorNamespace, cephClusterNamespace, namespace, args[0], allowInUse) | ||
}, | ||
} | ||
|
||
func init() { | ||
FlattenRBDPVCCmd.Flags().StringVarP(&namespace, "namespace", "n", "default", "pvc's namespace") | ||
FlattenRBDPVCCmd.Flags().BoolVarP(&allowInUse, "allow-in-use", "a", false, "allow to flatten in-use image") | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,12 @@ | ||
# Flatten RBD PVC | ||
|
||
`flatten-rbd-pvc` command is to flatten the RBD image corresponding to the target RBD PVC. | ||
Fore more details about flatten, see [the Ceph official document](https://docs.ceph.com/en/latest/rbd/rbd-snapshot/#flattening-a-cloned-image). | ||
|
||
By flattening RBD images, we can bypass the problems specific to non-flattened cloned image like https://github.com/ceph/ceph-csi/discussions/4360. | ||
|
||
## Examples. | ||
|
||
```bash | ||
kubectl rook-ceph flatten-rbd-pvc rbd-pvc-clone | ||
``` |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
package flatten_rbd_pvc | ||
|
||
import ( | ||
"context" | ||
"encoding/json" | ||
"fmt" | ||
|
||
"github.com/rook/kubectl-rook-ceph/pkg/exec" | ||
"github.com/rook/kubectl-rook-ceph/pkg/k8sutil" | ||
"github.com/rook/kubectl-rook-ceph/pkg/logging" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
) | ||
|
||
type RBDInfoOutput struct { | ||
ID string `json:"id"` | ||
Name string `json:"name"` | ||
} | ||
|
||
type Watcher struct { | ||
Address string `json:"address"` | ||
} | ||
|
||
type RBDStatusOutput struct { | ||
Watchers []Watcher `json:"watchers"` | ||
} | ||
|
||
func FlattenRBDPVC(ctx context.Context, clientSets *k8sutil.Clientsets, operatorNamespace, clusterNamespace, namespace, pvcName string, allowInUse bool) { | ||
pvc, err := clientSets.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) | ||
if err != nil { | ||
logging.Fatal(err, "failed to get PVC %s/%s", namespace, pvcName) | ||
} | ||
if pvc.DeletionTimestamp != nil { | ||
logging.Fatal(fmt.Errorf("PVC %s is deleting", pvcName)) | ||
} | ||
if pvc.Status.Phase != corev1.ClaimBound { | ||
logging.Fatal(fmt.Errorf("PVC %s is not bound", pvcName)) | ||
} | ||
|
||
shouldDeleteTempImage := false | ||
if pvc.Spec.DataSource != nil { | ||
switch pvc.Spec.DataSource.Kind { | ||
case "PersistentVolumeClaim": | ||
shouldDeleteTempImage = true | ||
case "VolumeSnapshot": | ||
default: | ||
logging.Fatal(fmt.Errorf("PVC %s is not a cloned image", pvcName)) | ||
} | ||
} | ||
|
||
pvName := pvc.Spec.VolumeName | ||
pv, err := clientSets.Kube.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) | ||
if err != nil { | ||
logging.Fatal(fmt.Errorf("failed to get PV %s", pvName)) | ||
} | ||
imageName, ok := pv.Spec.CSI.VolumeAttributes["imageName"] | ||
if !ok { | ||
logging.Fatal(fmt.Errorf("PV %s doesn't contains `imageName` in VolumeAttributes", pvName)) | ||
} | ||
poolName, ok := pv.Spec.CSI.VolumeAttributes["pool"] | ||
if !ok { | ||
logging.Fatal(fmt.Errorf("PV %s doesn't contains `pool` in VolumeAttributes", pvName)) | ||
} | ||
|
||
if !allowInUse { | ||
out, err := exec.RunCommandInOperatorPod(ctx, clientSets, "rbd", []string{"-p", poolName, "status", imageName, "--format=json"}, operatorNamespace, clusterNamespace, false) | ||
if err != nil { | ||
logging.Fatal(fmt.Errorf("failed to stat %s/%s", poolName, imageName)) | ||
} | ||
var status RBDStatusOutput | ||
json.Unmarshal([]byte(out), &status) | ||
if len(status.Watchers) > 0 { | ||
logging.Fatal(fmt.Errorf("flatten in-use pvc %s is not allowed. If you want to do, run with `--allow-in-use` option", pvcName)) | ||
} | ||
} | ||
|
||
if shouldDeleteTempImage { | ||
deleteTempImage(ctx, clientSets, operatorNamespace, clusterNamespace, poolName, imageName) | ||
} | ||
logging.Info("flattening the target RBD image %s/%s", poolName, imageName) | ||
_, err = exec.RunCommandInOperatorPod(ctx, clientSets, "ceph", []string{"rbd", "task", "add", "flatten", fmt.Sprintf("%s/%s", poolName, imageName)}, operatorNamespace, clusterNamespace, false) | ||
if err != nil { | ||
logging.Fatal(fmt.Errorf("failed to flatten %s/%s", poolName, imageName)) | ||
} | ||
} | ||
|
||
func deleteTempImage(ctx context.Context, clientSets *k8sutil.Clientsets, operatorNamespace, cephClusterNamespace, poolName, imageName string) { | ||
tempImageName := imageName + "-temp" | ||
|
||
out, err := exec.RunCommandInOperatorPod(ctx, clientSets, "rbd", []string{"-p", poolName, "info", "--format=json", tempImageName}, operatorNamespace, cephClusterNamespace, false) | ||
if err != nil { | ||
logging.Error(fmt.Errorf("failed to run `rbd info` for rbd image %s/%s", poolName, tempImageName)) | ||
return | ||
} | ||
var info RBDInfoOutput | ||
json.Unmarshal([]byte(out), &info) | ||
id := info.ID | ||
logging.Info("removing the temporary RBD image %s/%s if exist", poolName, tempImageName) | ||
_, err = exec.RunCommandInOperatorPod(ctx, clientSets, "rbd", []string{"-p", poolName, "trash", "mv", tempImageName}, operatorNamespace, cephClusterNamespace, false) | ||
if err != nil { | ||
logging.Fatal(fmt.Errorf("failed to move rbd image %s/%s to trash", poolName, tempImageName)) | ||
} | ||
if id != "" { | ||
_, err = exec.RunCommandInOperatorPod(ctx, clientSets, "ceph", []string{"rbd", "task", "add", "trash", "remove", fmt.Sprintf("%s/%s", poolName, id)}, operatorNamespace, cephClusterNamespace, false) | ||
if err != nil { | ||
logging.Fatal(fmt.Errorf("failed to create a task to remove %s/%s from trash", poolName, id)) | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -267,6 +267,28 @@ install_minikube_with_none_driver() { | |
sudo -E minikube start --kubernetes-version="$1" --driver=none --memory 6g --cpus=2 --addons ingress --cni=calico | ||
} | ||
|
||
install_external_snapshotter() { | ||
EXTERNAL_SNAPSHOTTER_VERSION=7.0.2 | ||
curl -L "https://github.com/kubernetes-csi/external-snapshotter/archive/refs/tags/v${EXTERNAL_SNAPSHOTTER_VERSION}.zip" -o external-snapshotter.zip | ||
unzip -d /tmp external-snapshotter.zip | ||
cd "/tmp/external-snapshotter-${EXTERNAL_SNAPSHOTTER_VERSION}" | ||
|
||
kubectl kustomize client/config/crd | kubectl create -f - | ||
kubectl -n kube-system kustomize deploy/kubernetes/snapshot-controller | kubectl create -f - | ||
} | ||
|
||
wait_for_rbd_pvc_clone_to_be_bound() { | ||
kubectl create -f https://raw.githubusercontent.com/rook/rook/master/deploy/examples/csi/rbd/pvc-clone.yaml | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i think you are referring to pvc-pvc clone here not pvc-restore right? where are we creating the volumesnapshotclass and volumesnapshot, if its planned in different PR can we make the changes related to it in same PR instead of here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yes, I only handle pvc-pvc clone for now. I'll add other tests in another PR as you suggested.
I'll create an issue to track this test improvement. The other tests will have the following items.
|
||
|
||
timeout 100 bash <<-'EOF' | ||
until [ $(kubectl get pvc rbd-pvc-clone -o jsonpath='{.status.phase}') == "Bound" ]; do | ||
echo "waiting for the pvc clone to be in bound state" | ||
sleep 1 | ||
done | ||
EOF | ||
timeout_command_exit_code | ||
} | ||
|
||
######## | ||
# MAIN # | ||
######## | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@satoru-takeuchi overall pr looks good, small suggestion if you could move the implementation function to under
pkg/
folder and not put it in the same file as where command is written. Thanks after that PR will be ready to merge.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@subhamkrai Indeed, moved.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
merged