diff --git a/.gitignore b/.gitignore index fc737de69ed13..f6d46d49c86d2 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,6 @@ openshift*.tar.gz # Ensure that openapi definitions are not ignored to ensure that # openshift/origin can vendor them. !pkg/generated/openapi/zz_generated.openapi.go + +# Ignore binaries for k8s-tests +/k8s-tests* diff --git a/.openshift-tests-extension/openshift_payload_hyperkube.json b/.openshift-tests-extension/openshift_payload_hyperkube.json new file mode 100644 index 0000000000000..ecb19b90c08c7 --- /dev/null +++ b/.openshift-tests-extension/openshift_payload_hyperkube.json @@ -0,0 +1,132871 @@ +[ + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should sign the new added bootstrap tokens [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial] [Disruptive] [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:BootstrapTokens": {}, + "Serial": {}, + "sig-cluster-lifecycle": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:BootstrapTokens": {}, + "sig-cluster-lifecycle": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-architecture] Conformance Tests should have at least two untainted nodes [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-architecture": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet's main port 10250 should reject requests with no credentials [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthenticator] The kubelet can delegate ServiceAccount tokens to the API server [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthenticator": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeAuthorizer": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] [Feature:ClusterTrustBundle] [Feature:ClusterTrustBundleProjection] should be able to mount a single ClusterTrustBundle by name [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterTrustBundle": {}, + "Feature:ClusterTrustBundleProjection": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1beta1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview testing SSR in different API groups authentication/v1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SelfSubjectReview should support SelfSubjectReview API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts no secret-based service account token should be auto-generated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount an API token into pods [Conformance] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should mount projected service account token [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should support InClusterConfig with token rotation [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccounts should update a ServiceAccount [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] SubjectReview should support SubjectReview API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for intra-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Pods should function for node-pod communication: sctp [LinuxOnly] [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API [Serial] [Disruptive] [Feature:EphemeralStorage] Downward API tests for local ephemeral storage should provide default limits.ephemeral-storage from node allocatable [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:EphemeralStorage": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir volumes pod should support memory backed volumes of specified size [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance] [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support r/w [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPath should support subPath [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected combined should project all components that make up the projection API [Projection] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:FSGroup": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets should be immutable if `immutable` field is set [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv4 should be mountable for NFSv4 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes NFSv3 should be mountable for NFSv3 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Logging soak [Performance] [Slow] [Disruptive] should survive logging 1KB every 1s seconds, for a duration of 2m0s [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from API server. [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet. [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler. [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager. [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] MetricsGrabber should grab all metrics slis from API server. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should manage the lifecycle of an event [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Events API should delete a collection of events [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-instrumentation] Metrics should grab all metrics from kubelet /metrics/resource endpoint [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-instrumentation": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API operations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol API should support creating NetworkPolicy API with endport field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy based on Ports [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [LinuxOnly] NetworkPolicy between server and client using UDP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol [Feature:SCTPConnectivity] [LinuxOnly] NetworkPolicy between server and client using SCTP should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-ingress' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support a 'default-deny-all' policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on any PodSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from namespace on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access on one named port [Feature:NetworkPolicy] [Skipped:Network/OVNKubernetes] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce updated policy [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow ingress access from updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress from pods on other namespaces [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny ingress access to updated pod [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from pods based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should deny egress from all pods in a namespace [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should work with Ingress, Egress specified together [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should stop enforcing policies after they are deleted [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should allow egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Skipped:Network/OpenShiftSDN] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Netpol NetworkPolicy between server and client should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy] [Disabled:Broken] [Skipped:Network/OpenShiftSDN/Multitenant] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NetworkPolicy": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet registers plugin [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must retry NodePrepareResources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must not run a pod if a claim is not ready [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must unprepare resources for force-deleted pod [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must call NodePrepareResources even if not used by any container [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] kubelet must map configs and devices to the right containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources schedules onto different nodes [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with network-attached resources [Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes reallocation works [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with classic DRA [Feature:DRAControlPlaneController] on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports claim and class parameters [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports reusing resources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim concurrently [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports sharing a claim sequentially [Slow] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after creating device class [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node retries pod scheduling after updating device class [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node runs a pod without a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node supports init containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on single node must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with different ResourceSlices keeps pod pending because of CEL runtime errors [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] with structured parameters on multiple nodes with node-local resources uses all resources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster support validating admission policy for admin access [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster truncates the name of a generated resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster supports count/resourceclaims.resource.k8s.io ResourceQuota [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] with setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports scheduled pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing inline resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports inline claim referenced by multiple containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports simple pod referencing external resource claim [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports external claim referenced by multiple containers of multiple pods [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor supports init containers [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor removes reservation from claim when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor deletes generated claims when pod is done [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor does not delete generated claims when pod is restarting [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with classic DRA [Feature:DRAControlPlaneController] without setting ReservedFor must deallocate after use [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DRAControlPlaneController": {}, + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must apply per-node permission checks [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] cluster with structured parameters must manage ResourceSlices [Slow] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] DRA [Feature:DynamicResourceAllocation] multiple drivers using only drapbv1alpha3 work [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should update ConfigMap successfully [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ConfigMap should be consumable as environment variable names when configmap keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should *not* be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should be restarted with a GRPC liveness probe [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Probing container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe should not be ready before initial delay and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container with readiness probe that fails should never be ready and never restart [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a tcp:8080 liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should have monotonically increasing restart count [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a /healthz http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a failing exec liveness probe that took longer than the timeout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a non-local redirect http liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted startup probe fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted by liveness probe because startup probe delays it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted by liveness probe after startup probe enables it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be ready immediately after startupProbe succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when LivenessProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should override timeoutGracePeriodSeconds when StartupProbe field is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should *not* be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should be restarted with a GRPC liveness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false while pod is in progress of terminating when a pod has a readiness probe [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Probing restartable init container should mark readiness on pods to false and disable liveness probes while pod is in progress of terminating [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default arguments (container cmd) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command (container entrypoint) [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide host IP and pod IP as an env var if pod uses host network [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide container's limits.hugepages-\u003cpagesize\u003e and requests.hugepages-\u003cpagesize\u003e as env vars [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages] Downward API tests for hugepages should provide default limits.hugepages-\u003cpagesize\u003e from node allocatable [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "NodeFeature:DownwardAPIHugePages": {}, + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] will start an ephemeral container in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Ephemeral Containers [NodeConformance] should update the ephemeral containers in an existing pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Variable Expansion allow almost all printable ASCII characters as environment variable names [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] ImageCredentialProvider [Feature:KubeletCredentialProviders] should be able to create pod with image credentials fetched from external credential provider [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeletCredentialProviders": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling an agnhost Pod with hostAliases should write entries to /etc/hosts [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet with pods in a privileged namespace when scheduling an agnhost Pod with hostAliases and hostNetwork should write entries to /etc/hosts when hostNetwork is enabled [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Lease lease API should be available [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [NodeFeature:SidecarContainers] [Feature:SidecarContainers] Restartable Init Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop https hook properly [MinimumKubeletVersion:1.23] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SidecarContainers": {}, + "NodeFeature:SidecarContainers": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action valid prestop hook using sleep action [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action reduce GracePeriodSeconds during runtime [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:PodLifecycleSleepAction] when create a pod with lifecycle hook using sleep action ignore terminated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodLifecycleSleepAction": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should create and update a lease in the kube-node-lease namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease should have OwnerReferences set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeLease NodeLease the kubelet should report node status infrequently [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodOSRejection [NodeConformance] Kubelet should reject pod when the node OS doesn't match pod's OS [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should get a host IP [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be submitted and removed [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should have their auto-restart back-off timer reset on image update [Slow] [NodeConformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should cap back-off at MaxContainerBackOff [Slow] [NodeConformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should support pod readiness gates [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should delete a collection of pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods should patch a pod status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should delete a collection of pod templates [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PodTemplates should replace a pod template [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PrivilegedPod [NodeConformance] should enable privileged commands [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test on terminated container should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull image [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a non-existent RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:RuntimeHandler": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a deleted RuntimeClass [NodeConformance] [Conformance] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should fail to create secret due to empty secret key [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should patch a secret [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Secrets should be consumable as environment variable names when secret keys start with a digit [Feature:RelaxedEnvironmentVariableValidation] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RelaxedEnvironmentVariableValidation": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with HostUsers should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:UserNamespacesSupport": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsUser should run the container with uid 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an explicit non-root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run with an explicit root user ID [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should run with an image specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a container with runAsNonRoot should not run without a specified user ID [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context When creating a pod with privileged should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:HostAccess": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [LinuxOnly] [NodeConformance] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] User Namespaces for Pod Security Standards [LinuxOnly] with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled should allow pod [Feature:UserNamespacesPodSecurityStandards] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:UserNamespacesPodSecurityStandards": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Environment:NotInUserNS": {}, + "NodeConformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] LimitRange should list, patch and delete a LimitRange by collection [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LocalStorageCapacityIsolation": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates pod overhead is considered along with resource limits of pods that are allowed to run verify pod overhead is accounted for [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] PodTopologySpread Filtering validates 4 pods with MaxSkew=1 are evenly distributed into 2 nodes [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] validates Pods with non-empty schedulingGates are blocked on scheduling [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPredicates [Serial] when PVC has node-affinity to non-existent/illegal nodes, the pod should be scheduled normally if suitable nodes exist [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] validates pod disruption condition is added to the preempted pod [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PodTopologySpread Preemption validates proper pods are preempted [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] SchedulerPriorities [Serial] PodTopologySpread Scoring validates pod should be preferably scheduled to node which makes the matching pods more evenly distributed [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a service across zones [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-scheduling] Multi-AZ Clusters should spread the pods of a replication controller across zones [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-scheduling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should schedule multiple jobs concurrently [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should not emit unexpected warnings [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should remove from active list jobs that have been deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should delete failed finished jobs with limit of one job [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support timezone [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] CronJob should support CronJob API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Controller Manager should not create/delete replicas across restart [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Scheduler should continue assigning pods to nodes across restart [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kubelet should not restart containers across restart [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DaemonRestart [Disruptive] Kube-proxy should recover after being killed accidentally [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks succeed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy on exit code to fail the job early [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use the pod failure policy to not count the failure towards the backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the exit code [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should allow to use a pod failure policy to ignore failure for an evicted pod; matching on the DisruptionTarget condition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should not create pods when created in suspend state [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete pods when suspended [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should recreate pods only after they have failed if pod replacement policy is set to Failed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should create pods for an Indexed job with completion indexes and specified hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy should succeeded when all indexes succeeded [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededIndexes rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job with successPolicy succeededCount rule should succeeded even when some indexes remain pending [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should execute all indexes despite some failing when using backoffLimitPerIndex [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should terminate job execution when the number of failed indexes exceeds maxFailedIndexes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should mark indexes as failed when the FailIndex action is matched in podFailurePolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should remove pods when job is deleted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail when exceeds active deadline [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should delete a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should fail to exceed backoffLimit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should run a job to completion with CPU requests [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should apply changes to a job status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should manage the lifecycle of a job [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Job should update the status ready field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should observe that the PodDisruptionBudget status is not updated for unmanaged pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: no PDB =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, absolute =\u003e should not allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, absolute =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage =\u003e should allow an eviction [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer =\u003e should not allow an eviction [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict ready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with Default UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should not evict unready pods with IfHealthyBudget UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] DisruptionController should evict unready pods with AlwaysAllow UnhealthyPodEvictionPolicy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should adopt matching pods on creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicationController should get and update a ReplicationController scale [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should serve a basic image on each replica with a private image [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet Replace and Patch tests [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 without failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications for partiton1 and delete pod-0 with failing container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working zookeeper cluster [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working redis cluster [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working mysql cluster [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StatefulSet": {}, + "Slow": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet MinReadySeconds should be honored when enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet AvailableReplicas should get updated accordingly when MinReadySeconds is enabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a WhenDeleted policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs with a OnScaledown policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVC with OnScaledown policy if another controller owns the PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenDeleted) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should delete PVCs after adopting pod (WhenScaled) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Non-retain StatefulSetPersistentVolumeClaimPolicy should not delete PVCs when there is another controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Automatically recreate PVC for pending pod when PVC is missing PVC should be recreated when pod is pending due to missing PVC [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Setting .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Increasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Decreasing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] StatefulSet Scaling StatefulSetStartOrdinal Removing .start.ordinal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance] [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should surge pods onto nodes when spec was updated and update strategy is RollingUpdate [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should delete old replica sets [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support rollover [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment iterative rollouts should eventually progress [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment Deployment should have a working scale subresource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment deployment should support proportional scaling [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should not disrupt a cloud load-balancer's connectivity during rollout [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should run the lifecycle of a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] Deployment should validate Deployment Status endpoints [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and \u003e=4 nodes takes less than 15 minutes [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up at all [Feature:ClusterAutoscalerScalability1] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability1": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale up twice [Feature:ClusterAutoscalerScalability2] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability2": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability3": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability4": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability5": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterAutoscalerScalability6": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 0 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale up GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should not scale GPU pool up if pod does not require GPUs [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should scale down GPU pool from 1 [GpuType:] [Feature:ClusterSizeAutoscalingGpu] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingGpu": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should correctly scale down after a node is not needed when there is non autoscaled pool [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down when rescheduling a pod is required and pdb allows for it [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining multiple pods one by one as dictated by pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0 [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0 [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleUp": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterSizeAutoscalingScaleDown": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when unprocessed pod is created and is going to be unschedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and is going to be schedulable [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when unprocessed pod is created and scheduler is not specified to be bypassed [Feature:ClusterScaleUpBypassScheduler] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterScaleUpBypassScheduler": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] [KubeUp] [sig-cloud-provider-gcp] kube-dns-autoscaler should scale kube-dns pods when cluster size changed [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "KubeUp": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:HPA": {}, + "Serial": {}, + "Slow": {}, + "sig-autoscaling": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a NodePort service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack should be able to preserve UDP traffic when initial unready endpoints get ready [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Conntrack proxy implementation should not be vulnerable to the invalid conntrack state bug [Privileged] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for the cluster [Provider:GCE] [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for the cluster [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide /etc/hosts entries for the cluster [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] [Skipped:Proxy] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Hostname [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for pods for Subdomain [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should provide DNS for ExternalName services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod DNS nameservers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should support configurable pod resolv.conf [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS should work with the pod containing more than 6 DNS search paths and longer than 256 search list characters [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork should resolve DNS of partial qualified names for services on hostNetwork pods with dnsPolicy: ClusterFirstWithHostNet [LinuxOnly] [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is not silently ignored and is used for hostname for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS HostNetwork spec.Hostname field is silently ignored and the node hostname is used when hostNetwork is set to true for a Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Change stubDomain should be able to change stubDomain configuration [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward PTR lookup should forward PTR records lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] DNS configMap nameserver Forward external name lookup should forward externalname lookup to upstream nameserver [Slow] [Serial] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:PerformanceDNS] [Serial] Should answer DNS query for maximum number of services per cluster [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PerformanceDNS": {}, + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should have ipv4 and ipv6 internal node ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to pod ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create pod, add ipv6 and ipv4 ip to host ips [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should be able to reach pod on ipv4 and ipv6 ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create a single stack service with cluster ip from primary service range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv4,v6 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] should create service with ipv6,v4 cluster ip [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:IPv6DualStack] Granular Checks: Services Secondary IP Family [LinuxOnly] should function for service endpoints using hostNetwork [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:IPv6DualStack": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple ports specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSlice should support a Service with multiple endpoint IPs specified in multiple EndpointSlices [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] EndpointSliceMirroring should mirror a custom Endpoint with multiple subsets and same IP address [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Example": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Ingress API should support creating Ingress API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should not set default value if no default IngressClass [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass [Feature:Ingress] should allow IngressClass to have Namespace-scoped parameters [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Ingress": {}, + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] IngressClass API should support creating IngressClass API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] KubeProxy should update metric for tracking accepted packets destined for localhost nodeports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work for type=LoadBalancer [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should only target nodes with endpoints [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should work from pods [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers ExternalTrafficPolicy: Local [Feature:LoadBalancer] [Slow] should handle updates to ExternalTrafficPolicy field [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] NoSNAT Should be able to send traffic between Pods without SNAT [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect from a Pod to a terminating Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Connectivity Pod Lifecycle should be able to connect to other Pod from a terminating Pod [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy logs on node using proxy subresource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Proxy version v1 A set of valid responses are returned for both pod and service Proxy [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a TCP service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to change the type and ports of a UDP service [Slow] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should only allow access from service loadbalancer source ranges [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Local traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should have session affinity work for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to switch session affinity for LoadBalancer service with Cluster traffic policy [Slow] [LinuxOnly] [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should handle load balancer cleanup finalizer for service [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to create LoadBalancer Service without NodePort and change it [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on different nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should be able to preserve UDP traffic when server pod cycles for a LoadBalancer service on the same nodes [Skipped:alibabacloud] [Skipped:aws] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] LoadBalancers [Feature:LoadBalancer] should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow] [Skipped:alibabacloud] [Skipped:baremetal] [Skipped:ibmcloud] [Skipped:kubevirt] [Skipped:nutanix] [Skipped:openstack] [Skipped:ovirt] [Skipped:vsphere] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:LoadBalancer": {}, + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should provide secure master service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve a basic endpoint from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve multiport endpoints from pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be updated after adding or deleting ports [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow pods to hairpin back to themselves through services [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to up and down services [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after the service has been recreated [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting kube-proxy [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should work after restarting apiserver [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to create a functioning NodePort service [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be possible to connect to a service via ExternalIP when the external IP is not assigned to a node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to update service type to NodePort listening on same port number but different protocols [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should prevent NodePort collisions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should check NodePort out-of-range [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should release NodePorts on delete [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should create endpoints for unready pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should not be able to connect to terminating and unready endpoints if PublishNotReadyAddresses is false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/service-proxy-name [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should implement service.kubernetes.io/headless [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected when no endpoints exist [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should be rejected for evicted pods (no endpoints exist) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should respect internalTrafficPolicy=Local Pod and Node, to Pod (hostNetwork: true) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should support externalTrafficPolicy=Local for type=NodePort [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fail health check node port if there are only terminating endpoints [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should find a service from listing all namespaces [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should test the lifecycle of an Endpoint [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should complete a service status lifecycle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should delete a collection of services [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocols [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should serve endpoints on same port and different protocol for internal traffic on Type LoadBalancer [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Services should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:ServiceCIDRs] should create Services and servce on different Service CIDRs [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ServiceCIDRs": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Service endpoints latency should not be very high [Conformance] [Serial] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Topology Hints": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] [Feature:Traffic Distribution] when Service has trafficDistribution=PreferClose should route traffic to an endpoint that is close to the client [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Traffic Distribution": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-IPv4": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv6] [Experimental][LinuxOnly] [Disabled:Broken] [Skipped:Disconnected] [Skipped:Proxy] [Skipped:azure] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-IPv6": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provider Internet connection for containers using DNS [Feature:Networking-DNS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-DNS": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should check kube-proxy urls [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for pod-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for node-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for endpoint-Service: sctp [Feature:SCTPConnectivity] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SCTPConnectivity": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for multiple endpoint-Services with same selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update endpoints: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: http [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should support basic nodePort: udp functionality [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should update nodePort: udp [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: http [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for client IP based session affinity: udp [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: http [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should be able to handle large requests: udp [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking Granular Checks: Services should function for service endpoints using hostNetwork [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should recreate its iptables rules if they are deleted [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] Networking IPerf2 [Feature:Networking-Performance] should run iperf2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-Performance": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified on the container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile specified in annotations [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Liveness liveness pods should be automatically restarted [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Secret should create a pod that reads a secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Feature:Example] Downward API should create a pod that prints his name and namespace [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Example": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s. [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet host cleanup with volume mounts [HostCleanup] [Flaky] Host cleanup after disrupting NFS volume [NFS] after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed. [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the error with an empty --query option [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the kubelet logs [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs for the current boot with the pattern container [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the kubelet logs since the current date and time [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the last three lines of the Microsoft-Windows-Security-SPP logs [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] kubelet kubectl get --raw \"/api/v1/nodes/\u003cinsert-node-name-here\u003e/proxy/logs/?query=/\u003cinsert-log-file-name-here\u003e [Feature:NodeLogQuery] should return the Microsoft-Windows-Security-SPP logs with the pattern Health [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:NodeLogQuery": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 0 pods per node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] regular resource usage tracking [Feature:RegularResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RegularResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Kubelet [Serial] [Slow] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ExperimentalResourceUsageTracking": {}, + "Serial": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Mount propagation should propagate mounts within defined scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NodeProblemDetector [NodeFeature:NodeProblemDetector] should run without error [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "NodeFeature:NodeProblemDetector": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod garbage collector [Feature:PodGarbageCollector] [Slow] should handle the creation of 1000 pods [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodGarbageCollector": {}, + "Slow": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] [Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling] pod-resize-scheduler-tests [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 memory [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU \u0026 decrease memory [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - decrease CPU \u0026 increase memory [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory limits only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory limits only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU limits only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU limits only [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease CPU requests and increase memory limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase CPU requests and decrease memory limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - decrease memory requests and increase CPU limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests + limits - increase memory requests and decrease CPU limits [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container with cpu \u0026 memory requests - decrease memory request [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Guaranteed QoS pod, one container - increase CPU (NotRequired) \u0026 memory (RestartContainer) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, one container - decrease CPU (RestartContainer) \u0026 memory (NotRequired) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod) [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] pod-resize-resource-quota-test [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling] BestEffort pod - try requesting memory, expect error [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:InPlacePodVerticalScaling": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Delete Grace Period should be submitted and removed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report success for a pending container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container Status should never report container start when an init container fails [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle should not create extra sandbox if all containers are done [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod Container lifecycle evicted pods should be terminal [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Pods Extended Pod TerminationGracePeriodSeconds is negative pod with negative grace period [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop should call prestop when killing a pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] PreStop graceful pod terminated should wait until preStop hook completes the process [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should reject a Pod requesting a RuntimeClass with conflicting node selector [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] RuntimeClass should run a Pod requesting a RuntimeClass with scheduling without taints [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context when if the container's primary UID belongs to some groups in the image [LinuxOnly] should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was not set if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Merge if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context [sig-node] SupplementalGroupsPolicy [Feature:SupplementalGroupsPolicy] when SupplementalGroupsPolicy was set to Strict even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:SupplementalGroupsPolicy": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling [Flaky] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostIPC [Flaky] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support volume SELinux relabeling when using hostPID [Flaky] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "LinuxOnly": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the container [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp unconfined on the pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp runtime/default [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] Security Context should support seccomp default which is unconfined [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] crictl should be able to run crictl on the node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] SSH should SSH to all nodes and run commands [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Single Pod [Serial] pods evicted from tainted nodes have pod disruption condition [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance] [Skipped:SingleReplicaTopology] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Disruptive": {}, + "Serial": {}, + "sig-node": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should support CSIVolumeSource in Pod API [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIInlineVolumes should run through the lifecycle of a CSIDriver [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "MinimumKubeletVersion:1.27": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Ephemeral Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (delete policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works, check deletion (ephemeral) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Pre-provisioned Snapshot (retain policy)] snapshottable [Feature:VolumeSnapshotDataSource] volume snapshot controller should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent) [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (delete policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic Snapshot (retain policy)] snapshottable-stress [Feature:VolumeSnapshotDataSource] should support snapshotting of many volumes repeatedly [Slow] [Serial] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volume-lifecycle-performance should provision volumes at scale within performance constraints [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should preempt lower priority pods using ReadWriteOncePod volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] read-write-once-pod [MinimumKubeletVersion:1.27] should block a second pod from using an in-use ReadWriteOncePod volume on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "MinimumKubeletVersion:1.27": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (default fs)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should create a volume with VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume with no VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Volumes [Driver: pd.csi.storage.gke.io] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-modify [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should modify volume that already has a VAC [Disabled:Alpha] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "Feature:Windows": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSIStorageCapacity should support CSIStorageCapacities API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not conflict [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Detaching volumes should not work when mount is in progress [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when non-attachable [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Flexvolumes should be mountable when attachable [Feature:Flexvolumes] [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Flexvolumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume expand [Slow] Should verify mounted flex volumes can be resized [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:Flexvolumes] Mounted flexvolume volume expand [Slow] should be resizable when mounted [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Flexvolumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] GenericPersistentVolume [Disruptive] When kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting non-existent directory 'does-not-exist-dir' when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should be able to mount directory 'adir' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Directory [Slow] Should fail on mounting directory 'adir' when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting non-existent file 'does-not-exist-file' when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should be able to mount file 'afile' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType File [Slow] Should fail on mounting file 'afile' when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting non-existent socket 'does-not-exist-socket' when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should be able to mount socket 'asocket' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Socket [Slow] Should fail on mounting socket 'asocket' when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting non-existent character device 'does-not-exist-char-dev' when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should be able to mount character device 'achardev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Character Device [Slow] Should fail on mounting character device 'achardev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting non-existent block device 'does-not-exist-blk-dev' when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathBlockDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should be able to mount block device 'ablkdev' successfully when HostPathType is HostPathUnset [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathDirectory [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathFile [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathSocket [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] HostPathType Block Device [Slow] Should fail on mounting block device 'ablkdev' when HostPathType is HostPathCharDev [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: nfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Disabled:Broken] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Disabled:Broken] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Volumes": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: iscsi] [Feature:Volumes] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Volumes": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: cinder] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Disabled:Unsupported] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: vsphere] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:azure] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-disk] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: azure-file] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: dir-link-bindmounted] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: tmpfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: block] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: blockfs] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] capacity provides storage capacity information [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext3)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should store data [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] volumes should allow exec of files on the volume [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumes should allow exec of files on the volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumes should store data [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volumeIO should write files of various sizes, verify size, validate content [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail to use a volume in a pod with mismatched mode [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volumeMode should not mount / map unused volumes in a pod [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support non-existent path [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directory [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing single file [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support file as subpath [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath directory is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath file is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if non-existent subpath is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should fail if subpath with backstepping is outside the volume [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support creating multiple subpath from same volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using directory as subpath [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support restarting containers using file as subpath [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is gracefully deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should unmount if pod is force deleted while kubelet is down [Disruptive] [Slow] [LinuxOnly] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly directory specified in the volumeMount [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support readOnly file specified in the volumeMount [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should support existing directories when readOnly specified in the volumeSource [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should verify container cannot write to subpath readonly volumes [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] subPath should be able to unmount after the subpath directory is deleted [LinuxOnly] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with mount options [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with any volume data source [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source (ROX mode) [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should provision storage with pvc data source in parallel [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] provisioning should mount multiple PV pointing to the same storage on the same node [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ext4)] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (xfs)] [Slow] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with the same volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should access to two volumes with different volume mode and retain data across pod recreation on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly] [Feature:VolumeSnapshotDataSource] [Feature:VolumeSourceXFS] [Skipped:NoOptionalCapabilities] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the volume and its clone from pods on the same node [LinuxOnly] [Feature:VolumeSourceXFS] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSourceXFS": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single read-only volume from pods on the same node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] multiVolume [Slow] should concurrently access the single volume from pods on different node [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)(allowExpansion)] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)] [Feature:Windows] volume-expand should not allow expansion of pvcs without AllowVolumeExpansion property [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand Verify if offline PVC expansion works [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (ntfs)(allowExpansion)] [Feature:Windows] volume-expand should resize volume when PVC is edited while pod is using it [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:SELinux": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Pre-provisioned PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv written before kubelet restart is readable after restart. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] disruptive [Disruptive] [LinuxOnly] Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns. [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "LinuxOnly": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (filesystem volmode)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should support volume limits [Serial] [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs)] volumeLimits should verify that all csinodes have volume limits [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (immediate binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should provision a volume and schedule a pod with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (delayed binding)] topology should fail to schedule a pod which has topologies that conflict with AllowedTopologies [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (block volmode)] volume-stress multiple pods should access different volumes repeatedly [Slow] [Serial] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (Always)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, new pod fsgroup applied to volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with same fsgroup skips ownership changes to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Dynamic PV (default fs)] fsgroupchangepolicy (OnRootMismatch)[LinuxOnly], pod created with an initial fsgroup, volume contents ownership changed via chgrp in first pod, new pod with different fsgroup applied to the volume contents [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (default fs) (immediate-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read-only inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should create read/write inline ephemeral volume [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support expansion of pvcs created for ephemeral pvcs [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support two pods which have the same volume definition [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] In-tree Volumes [Driver: local] [LocalVolumeType: gce-localssd-scsi-fs] [Serial] [Testpattern: Generic Ephemeral-volume (block volmode) (late-binding)] ephemeral should support multiple inline ephemeral volumes [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageProvider": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kube-controller-manager restarts should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a file written to the mount before kubelet restart is readable after restart. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] NFSPersistentVolumes [Disruptive] [Flaky] when kubelet restarts Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns. [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown [NonGracefulNodeShutdown] pod that uses a persistent volume via gce pd driver should get immediately rescheduled to a different node after non graceful node shutdown [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:NodeOutOfServiceVolumeDetach": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-expansion loopback local block volume should support online expansion on node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Skipped:gce] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: gce-localssd-scsi-fs] [Serial] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Skipped:gce] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: block] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2 [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted [Flaky] [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod has anti-affinity [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes spread across nodes when pod management is parallel and pod has anti-affinity [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes-local Stress with local volumes [Serial] should be able to process many pods and reuse local volumes [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV: test phase transition timestamp is set and phase is Available [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp is set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test phase transition timestamp multiple updates [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should run through the lifecycle of a PV and a PVC [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes CSI Conformance should apply changes to a pv/pvc status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PersistentVolumes Default StorageClass [LinuxOnly] pods that use multiple volumes should be reschedulable [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify \"immediate\" deletion of a PV that is not bound to a PVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify \"immediate\" deletion of a PVC that is not in active use by a pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Persistent Volume Claim and StorageClass Retroactive StorageClass assignment [Serial] [Disruptive] should assign default SC to PVCs that have no SC set [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StaticPods [Feature:Kind] should run after kubelet stopped with CSI volume mounted [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Kind": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] StorageClasses CSI Conformance should run through the lifecycle of a StorageClass [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Subpath Container restart should verify that container can restart successfully after configmaps modified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttachment Conformance should run through the lifecycle of a VolumeAttachment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVC should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning and attach/detach [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create prometheus metrics for volume provisioning errors [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct FilesystemMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics with the correct BlockMode PVC ref [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total time taken in volume operations in P/V Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create volume metrics in Volume Manager [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics Ephemeral should create metrics for total number of volumes in A/D Controller [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create none metrics for pvc controller before creating any PV or PVC [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pv count metrics for pvc controller after creating pv only [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller after creating pvc only [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller after creating both pv and pvc [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create unbound pvc count metrics for pvc controller with volume attributes class dimension after creating pvc only [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create bound pv/pvc count metrics for pvc controller with volume attributes class dimension after creating both pv and pvc [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] [Serial] Volume metrics PVController should create total pv count metrics for with plugin and volume mode labels after creating pv [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with different parameters [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should provision storage with non-default reclaim policy Retain [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] should test that deleting a claim before the volume is provisioned deletes the volume. [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] [Feature:StorageProvider] deletion should be idempotent [Skipped:NoOptionalCapabilities] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageProvider": {}, + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should create and delete default persistent volumes [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by changing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning DynamicProvisioner Default should be disabled by removing the default annotation [Serial] [Disruptive] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Dynamic Provisioning Invalid AWS KMS key should report an error and create no PV [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] VolumeAttributesClass [Feature:VolumeAttributesClass] [FeatureGate:VolumeAttributesClass] [Beta] should run through the lifecycle of a VolumeAttributesClass [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:VolumeAttributesClass": {}, + "FeatureGate:VolumeAttributesClass": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] Volumes ConfigMap should be mountable [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should not require VolumeAttach for drivers without attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should require VolumeAttach for ephemermal volume and drivers with attachment [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI attach test using mock driver should preserve attachment policy when no CSIDriver present [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume attach CSI CSIDriver deployment after pod creation using non-attachable mock driver should bringup pod after deploying CSIDriver attach=false [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock fsgroup as mount option Delegate FSGroup to CSI driver [LinuxOnly] should not pass FSGroup to CSI driver if it is set in pod and driver supports VOLUME_MOUNT_GROUP [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should modify fsGroup if fsGroupPolicy=File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy [LinuxOnly] should not modify fsGroup if fsGroupPolicy=None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from None to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from File to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from File to default [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should not update fsGroup if update from detault to None [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume fsgroup policies CSI FSGroupPolicy Update [LinuxOnly] should update fsGroup if update from detault to File [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Dynamic provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv delete reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock honor pv reclaim policy [Feature:HonorPVReclaimPolicy] [FeatureGate:HonorPVReclaimPolicy] [Beta] CSI honor pv reclaim policy using mock driver Static provisioning should honor pv retain reclaim policy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:HonorPVReclaimPolicy": {}, + "FeatureGate:HonorPVReclaimPolicy": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage success [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage final error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should retry NodeStage after NodeStage ephemeral error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should call NodeUnstage after NodeStage ephemeral error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeStage error cases [Slow] should not call NodeUnstage after NodeStage final error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] should call NodeStage after NodeUnstage success [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage final error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume node stage CSI NodeUnstage error cases [Slow] two pods: should call NodeStage after previous NodeUnstage transient error [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWOP volume and Pod with SELinux context set [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should add SELinux mount option to existing mount options [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for RWO volume with SELinuxMount disabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should pass SELinux mount option for RWO volume with SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for Pod without SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not pass SELinux mount option for CSI driver that does not support SELinux mount [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWOP volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWOP volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should not unstage RWO volume when starting a second pod with the same SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount SELinuxMount [LinuxOnly] [Feature:SELinux] should unstage RWO volume when starting a second pod with different SELinux context [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is not bumped on two Pods with the same context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] warning is bumped on two Pods with a different context on RWO volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Feature:SELinuxMountReadWriteOncePodOnly] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "Feature:SELinuxMountReadWriteOncePodOnly": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWX volume and SELinuxMount enabled [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [FeatureGate:SELinuxMount] [Alpha] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Alpha": {}, + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMount": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock selinux on mount metrics SELinuxMount metrics [LinuxOnly] [Feature:SELinux] [Serial] error is bumped on two Pods with a different context on RWOP volume [FeatureGate:SELinuxMountReadWriteOncePod] [Beta] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "Feature:SELinux": {}, + "FeatureGate:SELinuxMountReadWriteOncePod": {}, + "Serial": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when csiServiceAccountTokenEnabled=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should not be plumbed down when CSIDriver is not deployed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume service account token CSIServiceAccountToken token should be plumbed down when csiServiceAccountTokenEnabled=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots [Feature:VolumeSnapshotDataSource] volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource] volume snapshot create/delete with secrets [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit dynamic CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume snapshot CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource] snapshot controller should emit pre-provisioned CreateSnapshot, CreateSnapshotAndReady, and DeleteSnapshot metrics [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:VolumeSnapshotDataSource": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity unlimited [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, immediate binding [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, no topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity storage capacity exhausted, late binding, with topology [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity unused [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity disabled [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, no capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, insufficient capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume storage capacity CSIStorageCapacity CSIStorageCapacity used, have capacity [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume without restarting pod if nodeExpansion=off [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not have staging_path missing in node expand volume pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should expand volume by restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI Volume expansion should not expand volume if resizingOnDriver=off, resizingOnSC=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion with secret should expand volume without restarting pod if attach=on, nodeExpansion=on, csiNodeExpandSecret=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=on, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion CSI online volume expansion should expand volume without restarting pod if attach=off, nodeExpansion=on [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should record target size in allocated resources [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] should allow recovery if controller expansion fails with final error [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume expansion Expansion with recovery [Feature:RecoverVolumeExpansionFailure] recovery should not be possible in partially expanded volumes [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:RecoverVolumeExpansionFailure": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit when limit is bigger than 0 [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for generic ephemeral volume when persistent volume is attached [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock volume limit CSI volume limit information using mock driver should report attach limit for persistent volume when generic ephemeral volume is attached [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should be passed when podInfoOnMount=true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver contain ephemeral=true when using inline volume [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when podInfoOnMount=false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI workload information using mock driver should not be passed when CSIDriver does not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should not be passed when update from true to false [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-storage] CSI Mock workload info CSI PodInfoOnMount Update should be passed when update from false to true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-storage": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-apps] stateful Upgrade [Feature:StatefulUpgrade] stateful upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StatefulUpgrade": {}, + "sig-apps": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-auth] ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume] master upgrade should maintain a functioning cluster [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:BoundServiceAccountTokenVolume": {}, + "sig-auth": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Upgrade kube-proxy from static pods to a DaemonSet should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade] [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeProxyDaemonSetMigration": {}, + "Feature:KubeProxyDaemonSetUpgrade": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-network] kube-proxy migration [Feature:KubeProxyDaemonSetMigration] Downgrade kube-proxy from a DaemonSet to static pods should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade] [Disabled:Unimplemented] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeProxyDaemonSetDowngrade": {}, + "Feature:KubeProxyDaemonSetMigration": {}, + "sig-network": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] master upgrade should maintain a functioning cluster [Feature:MasterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:MasterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Upgrade [Feature:Upgrade] cluster upgrade should maintain a functioning cluster [Feature:ClusterUpgrade] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterUpgrade": {}, + "Feature:Upgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Downgrade [Feature:Downgrade] cluster downgrade should maintain a functioning cluster [Feature:ClusterDowngrade] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ClusterDowngrade": {}, + "Feature:Downgrade": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] GKE node pools [Feature:GKENodePool] should create a cluster with multiple node pools [Feature:GKENodePool] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:GKENodePool": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas same zone [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas different zones [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] HA-master [Feature:HAMaster] survive addition/removal replicas multizone workers [Serial] [Disruptive] [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:HAMaster": {}, + "Serial": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 10255 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Ports Security Check [Feature:KubeletSecurity] should not have port 4194 open on its all public IP addresses [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:KubeletSecurity": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] [Disruptive] NodeLease NodeLease deletion node lease should be deleted when corresponding node is deleted [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering clean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by ordering unclean reboot and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by triggering kernel panic and ensure they function upon restart [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by switching off the network interface and ensure they function upon switch on [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all inbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Reboot [Disruptive] [Feature:Reboot] each node by dropping all outbound packets for a while and ensure they function afterwards [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Reboot": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to delete nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Nodes [Disruptive] Resize [Slow] should be able to add nodes [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Slow": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider-gcp] Restart [Disruptive] [KubeUp] should restart all nodes and ensure all nodes and pods recover [Disabled:SpecialConfig] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "KubeUp": {}, + "sig-cloud-provider-gcp": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cloud-provider] [Feature:CloudProvider] [Disruptive] Nodes should be deleted on API server if it doesn't exist in the cloud provider [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:CloudProvider": {}, + "sig-cloud-provider": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Cpu Resources [Serial] Container limits should not be exceeded after waiting 2 minutes [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Density [Serial] [Slow] create a batch of pods latency/resource should be within limit when create 10 pods with 0s interval [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] DNS should support configurable pod DNS servers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Eviction [Serial] [Slow] [Disruptive] should evict a pod when a node experiences memory pressure [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support works end to end [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Full [Serial] [Slow] GMSA support can read and write file to remote SMB folder [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] GMSA Kubelet [Slow] kubelet GMSA support when creating a pod with correct GMSA credential specs passes the credential specs down to the Pod's containers [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as a process on the host/node [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support init containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container command path validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support various volume mount types [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers metrics should report count of started and failed to start HostProcess containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers container stats validation [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should support querying api-server using in-cluster config [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers should run as localgroup accounts [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHostProcessContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:GPUDevicePlugin] Device Plugin should be able to create a functioning device plugin for Windows [Disabled:SpecialConfig] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:GPUDevicePlugin": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should have stable networking for Linux and Windows pods [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection for Linux containers [Feature:Networking-IPv4] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Hybrid cluster network for all supported CNIs should provide Internet connection and DNS for Windows containers [Feature:Networking-IPv4] [Feature:Networking-DNS] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Networking-DNS": {}, + "Feature:Networking-IPv4": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:WindowsHyperVContainers] HyperV containers should start a hyperv isolated container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WindowsHyperVContainers": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats [Serial] Kubelet stats collection for Windows nodes when running 10 pods should return within 10 seconds [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when windows is booted should return bootid within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Kubelet-Stats Kubelet stats collection for Windows nodes when running 3 pods should return within 10 seconds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] Allocatable node memory should be equal to a calculated allocatable memory value [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Memory Limits [Serial] [Slow] attempt to deploy past allocatable memory limits should fail deployments of pods once there isn't enough memory [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow] should run as a reboot process on the host/node [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:Windows": {}, + "Serial": {}, + "Slow": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able create pods and run containers with a given username [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Pod level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with unknown usernames at Container level [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should override SecurityContext username if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should ignore Linux Specific SecurityContext if set [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as ContainerAdministrator when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should not be able to create pods with containers running as CONTAINERADMINISTRATOR when runAsNonRoot is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] SecurityContext should be able to create pod and run containers [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] Services should be able to create a functioning NodePort service for Windows [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on emptyDir [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-windows] [Feature:Windows] Windows volume mounts check volume mount permissions container should have readOnly permissions on hostMapPath [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Windows": {}, + "sig-windows": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery endpoint Accept headers [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support raw aggregated discovery request for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AggregatedDiscovery should support aggregated discovery interface for CRDs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] kube-apiserver identity [Feature:APIServerIdentity] kube-apiserver identity should persist after restart [Disruptive] [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "Feature:APIServerIdentity": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should create an applied object if it does not already exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for subresources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should remove a field if it is owned but removed in the apply request [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should not remove a field if an owner unsets the field but other managers still have ownership of the field [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should ignore conflict errors if force apply is used [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should work for CRDs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ServerSideApply should give up ownership of a field if forced applied by a controller [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow] [Conformance] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Slow": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] [Flaky] kubectl explain works for CR with the same resource name as built-in object. [Suite:k8s]", + "otherNames": null, + "labels": { + "Flaky": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to JSONSchema errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to JSONSchema errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT fail to update a resource due to CRD Validation Rule errors on unchanged correlatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on unchanged uncorrelatable fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST fail to update a resource due to CRD Validation Rule errors on changed fields [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST NOT ratchet errors raised by transition rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CRDValidationRatcheting [Privileged:ClusterAdmin] [FeatureGate:CRDValidationRatcheting] [Beta] MUST evaluate a CRD Validation Rule with oldSelf = nil for new values when optionalOldSelf is true [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CRDValidationRatcheting": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST NOT fail validation for create of a custom resource that satisfies the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail validation for create of a custom resource that does not satisfy the x-kubernetes-validations rules [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains a x-kubernetes-validations rule that refers to a property that do not exist [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that contains a syntax error [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource definition that contains an x-kubernetes-validations rule that exceeds the estimated cost limit [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail create of a custom resource that exceeds the runtime cost limit for x-kubernetes-validations rule execution [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceValidationRules [Privileged:ClusterAdmin] MUST fail update of a custom resource that does not satisfy a x-kubernetes-validations transition rule [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from network partition with master [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Etcd failure [Disruptive] should recover from SIGKILL [Serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Disruptive": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown and duplicate fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields of a typed object [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a valid CR for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply a CR with unknown fields for CRD with no validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should create/apply an invalid CR with extra properties for CRD with validation schema [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect unknown metadata fields in both the root and embedded object of a CR [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] FieldValidation should detect duplicates in a CR when preserving unknown fields [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness) [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support FlowSchema API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API priority and fairness should support PriorityLevelConfiguration API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Generated clientset should create v1 cronJobs, delete cronJobs, watch cronJobs [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] health handlers should contain necessary checks [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds) [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:ComprehensiveNamespaceDraining": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply changes to a namespace status [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply an update to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Namespaces [Serial] should apply a finalizer to a Namespace [Conformance] [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should round trip OpenAPI V3 for all built-in group versions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should publish OpenAPI V3 for CustomResourceDefinition [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] OpenAPIV3 should contain OpenAPI V3 for Aggregated APIServer [Serial] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/vnd.kubernetes.protobuf,application/json\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] client-go should negotiate watch and report errors with accept \"application/json,application/vnd.kubernetes.protobuf\" [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout should return HTTP status code 400 if the user specifies an invalid timeout in the request URL [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout the request should be served with a default timeout if the specified timeout in the request URL exceeds maximum allowed [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Server request timeout default timeout should be used if the specified timeout in the request URL is 0s [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a ResourceClaim [Feature:DynamicResourceAllocation] [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:DynamicResourceAllocation": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a custom resource. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should manage the lifecycle of a ResourceQuota [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should apply changes to a resourcequota status [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes through scope selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists). [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:PodPriority": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors. [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] server version should find the server version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] StorageVersion resources [Feature:StorageVersionAPI] storage version with non-existing id should be GC'ed [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:StorageVersionAPI": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return pod details [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should validate against a Deployment [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check validation expressions [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should allow expressions to refer variables. [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should type check a CRD [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicy API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] ValidatingAdmissionPolicy [Privileged:ClusterAdmin] should support ValidatingAdmissionPolicyBinding API operations [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by informers when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by client-go's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] API Streaming (aka. WatchList) [Serial] [Feature:WatchList] should be requested by dynamic client's List method when WatchListClient is enabled [Disabled:Alpha] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:WatchList": {}, + "Serial": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] CustomResourceFieldSelectors [Privileged:ClusterAdmin] [FeatureGate:CustomResourceFieldSelectors] [Beta] CustomResourceFieldSelectors MUST list and watch custom resources matching the field selector [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "Feature:Beta": {}, + "FeatureGate:CustomResourceFieldSelectors": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should accurately determine present and missing resources [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery Custom resource should have storage version hash [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] Discovery should locate the groupVersion and a resource within each APIGroup [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update validating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to create and update mutating webhook configurations with match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject validating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should reject mutating webhook configurations with invalid match conditions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate everything except 'skip-me' configmaps [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-api-machinery": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl exec should be able to execute 1000 times in a container [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl delete interactive based on user confirmation input [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs logs should be able to retrieve and filter logs [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs default container logs the second container is the default-container by annotation should log default container if not specified [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from all pods based on default container [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl logs all pod logs the Deployment has 2 replicas and each pod has 2 containers should get logs from each pod and each container in Deployment [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance] [Slow] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec using resource/name [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should support port-forward [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should return command exit codes should handle in-cluster config [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a successful command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run running a failing command [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command without --restart=Never, but with --rm [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod Kubectl run [Slow] running a failing command with --leave-stdin-open [Suite:k8s]", + "otherNames": null, + "labels": { + "Slow": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should support inline execution and attach with websockets or fallback to spdy [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Simple pod should contain last line of the log [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl prune with applyset should apply and prune objects [Disabled:RebaseInProgress] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance] [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "otherNames": null, + "labels": { + "Conformance": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "otherNames": null, + "labels": { + "Serial": {}, + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl events should show event when pod is created [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete [Disabled:Broken] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag should not be used in a bulk GET [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl client kubectl subresource flag GET on status subresource of built-in type (node) returns identical info as GET on the built-in type [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + }, + { + "name": "[sig-cli] Kubectl rollout undo undo should rollback and update deployment env [Suite:openshift/conformance/parallel] [Suite:k8s]", + "otherNames": null, + "labels": { + "sig-cli": {} + }, + "tags": null, + "resources": { + "isolation": { + "mode": "", + "conflict": null + }, + "memory": "", + "duration": "", + "timeout": "" + }, + "source": "openshift:payload:hyperkube", + "lifecycle": "blocking" + } +] \ No newline at end of file diff --git a/go.mod b/go.mod index 5c55fb1a6771b..2b952b01f99d3 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/moby/ipvs v1.1.0 github.com/mrunalp/fileutils v0.5.1 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 github.com/opencontainers/runc v1.1.13 github.com/opencontainers/selinux v1.11.0 @@ -191,6 +191,7 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -234,7 +235,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ./staging/src/k8s.io/api k8s.io/apiextensions-apiserver => ./staging/src/k8s.io/apiextensions-apiserver k8s.io/apimachinery => ./staging/src/k8s.io/apimachinery diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index fd111fea3cdae..356642b3ed923 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -80,6 +80,7 @@ kube::golang::server_targets() { cluster/gce/gci/mounter cmd/watch-termination openshift-hack/cmd/k8s-tests + openshift-hack/cmd/k8s-tests-ext ) echo "${targets[@]}" } diff --git a/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go new file mode 100644 index 0000000000000..35f345c2fac7c --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/k8s-tests.go @@ -0,0 +1,105 @@ +package main + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/openshift-eng/openshift-tests-extension/pkg/cmd" + e "github.com/openshift-eng/openshift-tests-extension/pkg/extension" + "github.com/openshift-eng/openshift-tests-extension/pkg/extension/extensiontests" + g "github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo" + v "github.com/openshift-eng/openshift-tests-extension/pkg/version" + + "k8s.io/client-go/pkg/version" + utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + "k8s.io/kubernetes/openshift-hack/e2e/annotate/generated" + + // initialize framework extensions + _ "k8s.io/kubernetes/test/e2e/framework/debug/init" + _ "k8s.io/kubernetes/test/e2e/framework/metrics/init" +) + +func main() { + logs.InitLogs() + defer logs.FlushLogs() + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + + // Get version info from kube + kubeVersion := version.Get() + v.GitTreeState = kubeVersion.GitTreeState + v.BuildDate = kubeVersion.BuildDate + v.CommitFromGit = kubeVersion.GitCommit + + // Create our registry of openshift-tests extensions + extensionRegistry := e.NewRegistry() + kubeTestsExtension := e.NewExtension("openshift", "payload", "hyperkube") + extensionRegistry.Register(kubeTestsExtension) + + // Carve up the kube tests into our openshift suites... + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/parallel", + Parents: []string{ + "openshift/conformance/parallel", + "openshift/conformance/parallel/minimal", + }, + Qualifiers: []string{`!labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance"`}, + }) + + kubeTestsExtension.AddSuite(e.Suite{ + Name: "kubernetes/conformance/serial", + Parents: []string{ + "openshift/conformance/serial", + "openshift/conformance/serial/minimal", + }, + Qualifiers: []string{`labels.exists(l, l == "Serial") && labels.exists(l, l == "Conformance"`}, + }) + + //FIXME(stbenjam): what other suites does k8s-test contribute to? + + // Build our specs from ginkgo + specs, err := g.BuildExtensionTestSpecsFromOpenShiftGinkgoSuite() + if err != nil { + panic(err) + } + + // Initialization for kube ginkgo test framework needs to run before all tests execute + specs.AddBeforeAll(func() { + if err := initializeTestFramework(os.Getenv("TEST_PROVIDER")); err != nil { + panic(err) + } + }) + + // Annotations get appended to test names, these are additions to upstream + // tests for controlling skips, suite membership, etc. + // + // TODO: + // - Remove this annotation code, and migrate to Labels/Tags and + // the environmental skip code from the enhancement once its implemented. + // - Make sure to account for test renames that occur because of removal of these + // annotations + specs.Walk(func(spec *extensiontests.ExtensionTestSpec) { + if annotations, ok := generated.Annotations[spec.Name]; ok { + spec.Name += annotations + } + }) + + kubeTestsExtension.AddSpecs(specs) + + // Cobra stuff + root := &cobra.Command{ + Long: "Kubernetes tests extension for OpenShift", + } + + root.AddCommand( + cmd.DefaultExtensionCommands(extensionRegistry)..., + ) + + if err := func() error { + return root.Execute() + }(); err != nil { + os.Exit(1) + } +} diff --git a/openshift-hack/cmd/k8s-tests-ext/provider.go b/openshift-hack/cmd/k8s-tests-ext/provider.go new file mode 100644 index 0000000000000..cdc948a45c652 --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/provider.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kclientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubernetes/openshift-hack/e2e" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/storage/external" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + // these are loading important global flags that we need to get and set + _ "k8s.io/kubernetes/test/e2e" + _ "k8s.io/kubernetes/test/e2e/lifecycle" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/provider.go +// and github.com/openshift/origin/test/extended/util/test.go +func initializeTestFramework(provider string) error { + providerInfo := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), &providerInfo); err != nil { + return fmt.Errorf("provider must be a JSON object with the 'type' key at a minimum: %v", err) + } + if len(providerInfo.ProviderName) == 0 { + return fmt.Errorf("provider must be a JSON object with the 'type' key") + } + config := &ClusterConfiguration{} + if err := json.Unmarshal([]byte(provider), config); err != nil { + return fmt.Errorf("provider must decode into the ClusterConfig object: %v", err) + } + + // update testContext with loaded config + testContext := &framework.TestContext + testContext.Provider = config.ProviderName + testContext.CloudConfig = framework.CloudConfig{ + ProjectID: config.ProjectID, + Region: config.Region, + Zone: config.Zone, + Zones: config.Zones, + NumNodes: config.NumNodes, + MultiMaster: config.MultiMaster, + MultiZone: config.MultiZone, + ConfigFile: config.ConfigFile, + } + testContext.AllowedNotReadyNodes = -1 + testContext.MinStartupPods = -1 + testContext.MaxNodesToGather = 0 + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // allow the CSI tests to access test data, but only briefly + // TODO: ideally CSI would not use any of these test methods + // var err error + // exutil.WithCleanup(func() { err = initCSITests(dryRun) }) + // TODO: for now I'm only initializing CSI directly, but we probably need that + // WithCleanup here as well + if err := initCSITests(); err != nil { + return err + } + + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } + + testContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + testContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + testContext.KubectlPath = "kubectl" + // context.KubeConfig = KubeConfigPath() + testContext.KubeConfig = os.Getenv("KUBECONFIG") + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + testContext.NodeOSDistro = "custom" + testContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: testContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + testContext.Host = cfg.Host + + // Ensure that Kube tests run privileged (like they do upstream) + testContext.CreateTestingNS = func(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*corev1.Namespace, error) { + return e2e.CreateTestingNS(ctx, baseName, c, labels, true) + } + + gomega.RegisterFailHandler(ginkgo.Fail) + + framework.AfterReadingAllFlags(testContext) + testContext.DumpLogsOnFailure = true + + // these constants are taken from kube e2e and used by tests + testContext.IPFamily = "ipv4" + if config.HasIPv6 && !config.HasIPv4 { + testContext.IPFamily = "ipv6" + } + + testContext.ReportDir = os.Getenv("TEST_JUNIT_DIR") + + return nil +} + +const ( + manifestEnvVar = "TEST_CSI_DRIVER_FILES" +) + +// copied directly from github.com/openshift/origin/cmd/openshift-tests/csi.go +// Initialize openshift/csi suite, i.e. define CSI tests from TEST_CSI_DRIVER_FILES. +func initCSITests() error { + manifestList := os.Getenv(manifestEnvVar) + if manifestList != "" { + manifests := strings.Split(manifestList, ",") + for _, manifest := range manifests { + if err := external.AddDriverDefinition(manifest); err != nil { + return fmt.Errorf("failed to load manifest from %q: %s", manifest, err) + } + // Register the base dir of the manifest file as a file source. + // With this we can reference the CSI driver's storageClass + // in the manifest file (FromFile field). + testfiles.AddFileSource(testfiles.RootFileSource{ + Root: filepath.Dir(manifest), + }) + } + } + + return nil +} diff --git a/openshift-hack/cmd/k8s-tests-ext/types.go b/openshift-hack/cmd/k8s-tests-ext/types.go new file mode 100644 index 0000000000000..b43652499537d --- /dev/null +++ b/openshift-hack/cmd/k8s-tests-ext/types.go @@ -0,0 +1,47 @@ +package main + +// copied directly from github.com/openshift/origin/test/extended/util/cluster/cluster.go +type ClusterConfiguration struct { + ProviderName string `json:"type"` + + // These fields (and the "type" tag for ProviderName) chosen to match + // upstream's e2e.CloudConfig. + ProjectID string + Region string + Zone string + NumNodes int + MultiMaster bool + MultiZone bool + Zones []string + ConfigFile string + + // Disconnected is set for test jobs without external internet connectivity + Disconnected bool + + // SingleReplicaTopology is set for disabling disruptive tests or tests + // that require high availability + SingleReplicaTopology bool + + // NetworkPlugin is the "official" plugin name + NetworkPlugin string + // NetworkPluginMode is an optional sub-identifier for the NetworkPlugin. + // (Currently it is only used for OpenShiftSDN.) + NetworkPluginMode string `json:",omitempty"` + + // HasIPv4 and HasIPv6 determine whether IPv4-specific, IPv6-specific, + // and dual-stack-specific tests are run + HasIPv4 bool + HasIPv6 bool + + // HasSCTP determines whether SCTP connectivity tests can be run in the cluster + HasSCTP bool + + // IsProxied determines whether we are accessing the cluster through an HTTP proxy + IsProxied bool + + // IsIBMROKS determines whether the cluster is Managed IBM Cloud (ROKS) + IsIBMROKS bool + + // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled + HasNoOptionalCapabilities bool +} diff --git a/openshift-hack/images/hyperkube/Dockerfile.rhel b/openshift-hack/images/hyperkube/Dockerfile.rhel index 5257653bc8d07..7df4e779e3389 100644 --- a/openshift-hack/images/hyperkube/Dockerfile.rhel +++ b/openshift-hack/images/hyperkube/Dockerfile.rhel @@ -1,11 +1,12 @@ FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.22-openshift-4.18 AS builder WORKDIR /go/src/k8s.io/kubernetes COPY . . -RUN make WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet cmd/watch-termination openshift-hack/cmd/k8s-tests' && \ +RUN make WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet cmd/watch-termination openshift-hack/cmd/k8s-tests openshift-hack/cmd/k8s-tests-ext' && \ mkdir -p /tmp/build && \ cp openshift-hack/images/hyperkube/hyperkube openshift-hack/images/hyperkube/kubensenter /tmp/build && \ - cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/{kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,watch-termination,k8s-tests} \ - /tmp/build + cp /go/src/k8s.io/kubernetes/_output/local/bin/linux/$(go env GOARCH)/{kube-apiserver,kube-controller-manager,kube-scheduler,kubelet,watch-termination,k8s-tests,k8s-tests-ext} \ + /tmp/build && \ + gzip /tmp/build/k8s-tests-ext FROM registry.ci.openshift.org/ocp/4.18:base-rhel9 RUN yum install -y --setopt=tsflags=nodocs --setopt=skip_missing_names_on_install=False iproute && yum clean all diff --git a/staging/src/k8s.io/api/go.mod b/staging/src/k8s.io/api/go.mod index deb5688a2e380..a7587b1346cdf 100644 --- a/staging/src/k8s.io/api/go.mod +++ b/staging/src/k8s.io/api/go.mod @@ -21,6 +21,8 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -41,7 +43,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index e4e6189685208..71b84d7a75808 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -78,6 +78,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/openshift/library-go v0.0.0-20241001171606-756adf2188fc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -131,7 +133,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/apimachinery/go.mod b/staging/src/k8s.io/apimachinery/go.mod index 804d103e3cc42..a95f83d9c9fc9 100644 --- a/staging/src/k8s.io/apimachinery/go.mod +++ b/staging/src/k8s.io/apimachinery/go.mod @@ -16,7 +16,7 @@ require ( github.com/google/uuid v1.6.0 github.com/moby/spdystream v0.4.0 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.29.0 @@ -44,6 +44,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect @@ -57,6 +58,6 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/apimachinery => ../apimachinery ) diff --git a/staging/src/k8s.io/apiserver/go.mod b/staging/src/k8s.io/apiserver/go.mod index 1119155b68cfa..8bb4432729f5a 100644 --- a/staging/src/k8s.io/apiserver/go.mod +++ b/staging/src/k8s.io/apiserver/go.mod @@ -93,6 +93,8 @@ require ( github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect @@ -128,7 +130,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index 92c0eabd3ea67..ed5da53c25349 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -54,7 +54,9 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -74,7 +76,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 6a5f0f322729d..fbb839bf50833 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -54,7 +54,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/ginkgo/v2 v2.17.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -67,7 +68,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/cloud-provider/go.mod b/staging/src/k8s.io/cloud-provider/go.mod index d17e0dab5f463..0d8eefcf0b676 100644 --- a/staging/src/k8s.io/cloud-provider/go.mod +++ b/staging/src/k8s.io/cloud-provider/go.mod @@ -60,6 +60,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/openshift/library-go v0.0.0-20241001171606-756adf2188fc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -111,7 +113,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/cluster-bootstrap/go.mod b/staging/src/k8s.io/cluster-bootstrap/go.mod index 834cf93daa5b7..99d17c15ac196 100644 --- a/staging/src/k8s.io/cluster-bootstrap/go.mod +++ b/staging/src/k8s.io/cluster-bootstrap/go.mod @@ -22,6 +22,8 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/crypto v0.27.0 // indirect @@ -36,7 +38,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cluster-bootstrap => ../cluster-bootstrap diff --git a/staging/src/k8s.io/code-generator/examples/go.mod b/staging/src/k8s.io/code-generator/examples/go.mod index 59229b11bd31f..c69c52d22860b 100644 --- a/staging/src/k8s.io/code-generator/examples/go.mod +++ b/staging/src/k8s.io/code-generator/examples/go.mod @@ -59,4 +59,4 @@ replace ( replace github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 -replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 diff --git a/staging/src/k8s.io/code-generator/go.mod b/staging/src/k8s.io/code-generator/go.mod index 0f6e283d3ec92..2b82237d331e5 100644 --- a/staging/src/k8s.io/code-generator/go.mod +++ b/staging/src/k8s.io/code-generator/go.mod @@ -32,7 +32,9 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/x448/float16 v0.8.4 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.8.0 // indirect @@ -44,7 +46,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/apimachinery => ../apimachinery k8s.io/code-generator => ../code-generator ) diff --git a/staging/src/k8s.io/component-base/go.mod b/staging/src/k8s.io/component-base/go.mod index dfacc96f8c6bd..09f65bea40dbc 100644 --- a/staging/src/k8s.io/component-base/go.mod +++ b/staging/src/k8s.io/component-base/go.mod @@ -58,6 +58,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect @@ -83,7 +85,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/component-helpers/go.mod b/staging/src/k8s.io/component-helpers/go.mod index 5f25205945c3a..11beb39fa1c6b 100644 --- a/staging/src/k8s.io/component-helpers/go.mod +++ b/staging/src/k8s.io/component-helpers/go.mod @@ -33,6 +33,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -55,7 +57,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/controller-manager/go.mod b/staging/src/k8s.io/controller-manager/go.mod index 8f5d182927cca..a8ce6cf9fcc58 100644 --- a/staging/src/k8s.io/controller-manager/go.mod +++ b/staging/src/k8s.io/controller-manager/go.mod @@ -55,7 +55,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -104,7 +106,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/cri-api/go.mod b/staging/src/k8s.io/cri-api/go.mod index 945fc5d5221f4..721c6ad4f4d96 100644 --- a/staging/src/k8s.io/cri-api/go.mod +++ b/staging/src/k8s.io/cri-api/go.mod @@ -13,6 +13,8 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/kr/pretty v0.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect golang.org/x/net v0.29.0 // indirect @@ -25,3 +27,5 @@ require ( ) replace k8s.io/cri-api => ../cri-api + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 diff --git a/staging/src/k8s.io/cri-client/go.mod b/staging/src/k8s.io/cri-client/go.mod index 51bf0d73f225f..c8163b0fc3373 100644 --- a/staging/src/k8s.io/cri-client/go.mod +++ b/staging/src/k8s.io/cri-client/go.mod @@ -48,6 +48,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect @@ -80,7 +82,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/csi-translation-lib/go.mod b/staging/src/k8s.io/csi-translation-lib/go.mod index 7dbb2bb5e4a73..e9748b2e94437 100644 --- a/staging/src/k8s.io/csi-translation-lib/go.mod +++ b/staging/src/k8s.io/csi-translation-lib/go.mod @@ -22,6 +22,8 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.29.0 // indirect @@ -35,7 +37,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/csi-translation-lib => ../csi-translation-lib diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index c91443e7b729f..e7ff05e6a1bed 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -46,6 +46,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -79,7 +81,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/endpointslice/go.mod b/staging/src/k8s.io/endpointslice/go.mod index ca7726cfaeb6c..475869bfd3e64 100644 --- a/staging/src/k8s.io/endpointslice/go.mod +++ b/staging/src/k8s.io/endpointslice/go.mod @@ -38,7 +38,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect @@ -65,7 +67,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kms/go.mod b/staging/src/k8s.io/kms/go.mod index 9e491fe066f47..b074433dcffea 100644 --- a/staging/src/k8s.io/kms/go.mod +++ b/staging/src/k8s.io/kms/go.mod @@ -10,6 +10,8 @@ require ( ) require ( + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 golang.org/x/net v0.29.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.18.0 // indirect @@ -18,3 +20,5 @@ require ( ) replace k8s.io/kms => ../kms + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 diff --git a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod index 690494697dbf9..ed9b33e6fcd20 100644 --- a/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod +++ b/staging/src/k8s.io/kms/internal/plugins/_mock/go.mod @@ -24,4 +24,4 @@ replace k8s.io/kms => ../../../../kms replace github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 -replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index 0a940c877bd69..3493e54f1babe 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -65,6 +65,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/openshift/library-go v0.0.0-20241001171606-756adf2188fc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -114,7 +116,7 @@ require ( replace ( github.com/google/cadvisor => github.com/openshift/google-cadvisor v0.49.0-openshift-4.17-2 - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/kube-controller-manager/go.mod b/staging/src/k8s.io/kube-controller-manager/go.mod index 83ccc330606e4..c38918da44c0a 100644 --- a/staging/src/k8s.io/kube-controller-manager/go.mod +++ b/staging/src/k8s.io/kube-controller-manager/go.mod @@ -19,6 +19,8 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/text v0.18.0 // indirect @@ -33,7 +35,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/kube-proxy/go.mod b/staging/src/k8s.io/kube-proxy/go.mod index e0a0feccc3ade..c4305178eecb8 100644 --- a/staging/src/k8s.io/kube-proxy/go.mod +++ b/staging/src/k8s.io/kube-proxy/go.mod @@ -24,6 +24,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect @@ -45,7 +47,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kube-scheduler/go.mod b/staging/src/k8s.io/kube-scheduler/go.mod index 4d3e5d091c9ae..2fe8e8fc17e94 100644 --- a/staging/src/k8s.io/kube-scheduler/go.mod +++ b/staging/src/k8s.io/kube-scheduler/go.mod @@ -21,6 +21,8 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/text v0.18.0 // indirect @@ -33,7 +35,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index c787e16ea5693..da0585b84b006 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -19,7 +19,7 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mitchellh/go-wordwrap v1.0.1 github.com/moby/term v0.5.0 - github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 github.com/pkg/errors v0.9.1 github.com/russross/blackfriday/v2 v2.1.0 @@ -78,6 +78,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -97,7 +98,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/kubelet/go.mod b/staging/src/k8s.io/kubelet/go.mod index 9b394509cf08c..a4d8e89012c21 100644 --- a/staging/src/k8s.io/kubelet/go.mod +++ b/staging/src/k8s.io/kubelet/go.mod @@ -37,6 +37,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect @@ -62,7 +64,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/apiserver => ../apiserver diff --git a/staging/src/k8s.io/metrics/go.mod b/staging/src/k8s.io/metrics/go.mod index 30b0e9da4d07a..0e578fafba3d4 100644 --- a/staging/src/k8s.io/metrics/go.mod +++ b/staging/src/k8s.io/metrics/go.mod @@ -32,6 +32,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -60,7 +62,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go diff --git a/staging/src/k8s.io/mount-utils/go.mod b/staging/src/k8s.io/mount-utils/go.mod index 6fe6da4e8f62c..204d2c17ef18e 100644 --- a/staging/src/k8s.io/mount-utils/go.mod +++ b/staging/src/k8s.io/mount-utils/go.mod @@ -20,7 +20,9 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/kr/pretty v0.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect @@ -29,3 +31,5 @@ require ( ) replace k8s.io/mount-utils => ../mount-utils + +replace github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 diff --git a/staging/src/k8s.io/pod-security-admission/go.mod b/staging/src/k8s.io/pod-security-admission/go.mod index fbe19aef5aeb0..6c756727064a7 100644 --- a/staging/src/k8s.io/pod-security-admission/go.mod +++ b/staging/src/k8s.io/pod-security-admission/go.mod @@ -57,7 +57,9 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 github.com/onsi/gomega v1.33.1 // indirect + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/openshift/library-go v0.0.0-20241001171606-756adf2188fc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -108,7 +110,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index 163d0c29bedfe..4dde024007c21 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -55,6 +55,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/openshift/library-go v0.0.0-20241001171606-756adf2188fc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -110,7 +112,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apiextensions-apiserver => ../apiextensions-apiserver k8s.io/apimachinery => ../apimachinery diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index 237efd7a4a37c..06f5a59a02ac2 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -42,6 +42,8 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -73,7 +75,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/cli-runtime => ../cli-runtime diff --git a/staging/src/k8s.io/sample-controller/go.mod b/staging/src/k8s.io/sample-controller/go.mod index c5bdef493d809..4e4cb3eba9dcb 100644 --- a/staging/src/k8s.io/sample-controller/go.mod +++ b/staging/src/k8s.io/sample-controller/go.mod @@ -35,6 +35,8 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 + github.com/openshift-eng/openshift-tests-extension v0.0.0-20241008125406-e4e57f0bc1e8 github.com/pkg/errors v0.9.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect @@ -60,7 +62,7 @@ require ( ) replace ( - github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20240806135314-3946b2b7b2a8 + github.com/onsi/ginkgo/v2 => github.com/openshift/onsi-ginkgo/v2 v2.6.1-0.20241002180654-3ded579fec72 k8s.io/api => ../api k8s.io/apimachinery => ../apimachinery k8s.io/client-go => ../client-go