Skip to content

Commit

Permalink
Support access points on the same file system
Browse files Browse the repository at this point in the history
Expands the supported `volumeHandle` formats to include a three-field
version: `{fsid}:{subpath}:{apid}`. This addresses the limitation
originally described in #100 whereby k8s relies solely on the
`volumeHandle` to uniquely distinguish one PV from another.

As part of this fix, specifying `accesspoint=fsap-...` in `mountOptions`
is deprecated.

For more details, see the related issue (#167).

The following scenarios were tested in a live environment:

**Conflicting access point in volumeHandle and mountOptions**

- `volumeHandle: fs::ap1`
- `mountOptions: ['tls', 'accesspoint=ap2']`
- expect: fail
- actual: fail with `Warning  FailedMount  1s (x4 over 4s)  kubelet, ip-10-0-137-122.ec2.internal  MountVolume.SetUp failed for volume "pv-aptest-1" : kubernetes.io/csi: mounter.SetupAt failed: rpc error: code = InvalidArgument desc = Found conflicting access point IDs in mountOptions (fsap-04d3307beebd04739) and volumeHandle (fsap-057e9a9b209ec9531)`
- result: ✔

**Same access point in volumeHandle and mountOptions**

- `volumeHandle: fs::ap1`
- `mountOptions: ['tls', 'accesspoint=ap1']`
- expect: success
- result: ✔

**Two access points on the same file system**

Also makes sure we populate tls and accesspoint in mountOptions

- `mountOptions: []` (for both)
- PV1:
  - `volumeHandle: fs1::ap1`
- PV2:
  - `volumeHandle: fs1::ap2`
- expect: success, both mounts accessible and distinct
- result: ✔

**Subpaths with access points**

- `mountOptions: []` (for all)
- PV1:
  - `volumeHandle: fs1::ap1` (root -- should be able to see /foo and bar)
- PV2:
  - `volumeHandle: fs1:/foo/bar:ap1` (absolute path)
- PV3:
  - `volumeHandle: fs1:foo/bar:ap1` (relative path)
- expect: success
- actual: success (had to create `$absolutemountpoint/foo/bar` in the fs first, as expected)
- result: ✔

Signed-off-by: Eric Fried <[email protected]>
  • Loading branch information
2uasimojo committed Jun 18, 2020
1 parent f66f6a2 commit f99c4b3
Show file tree
Hide file tree
Showing 4 changed files with 258 additions and 29 deletions.
6 changes: 2 additions & 4 deletions examples/kubernetes/access_points/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ In this case, the separation is managed on the EFS side rather than the kubernet

### Create Access Points (in EFS)
Following [this doc](https://docs.aws.amazon.com/efs/latest/ug/create-access-point.html), create a separate access point for each independent data store you wish to expose in your cluster, tailoring the ownership and permissions as desired.

**Note**: Until [issue #167](https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/167) is resolved, you must use a separate file system for each access point if they are to be mounted in the same pod.
There is no need to use different EFS volumes.

**Note**: Although it is possible to [configure IAM policies for access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#access-points-iam-policy), by default no additional IAM permissions are necessary.

Expand Down Expand Up @@ -59,8 +58,7 @@ You can find these values using the AWS CLI:
```sh
>> aws efs describe-access-points --query 'AccessPoints[*].{"FileSystemId": FileSystemId, "AccessPointId": AccessPointId}'
```

**Note**: Until [issue #167](https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/167) is resolved, both the `FileSystemId` and the `AccessPointId` must be unique in each PersistentVolume.
If you are using the same underlying EFS volume, the `FileSystemId` will be the same in both PersistentVolume specs, but the `AccessPointId` will differ.

### Deploy the Example Application
Create PVs, persistent volume claims (PVCs), and storage class:
Expand Down
2 changes: 1 addition & 1 deletion examples/kubernetes/access_points/specs/example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ spec:
- accesspoint=fsap-19f752f0068c22464
csi:
driver: efs.csi.aws.com
volumeHandle: fs-2e48aa59
volumeHandle: fs-e8a95a42
---
apiVersion: v1
kind: PersistentVolumeClaim
Expand Down
103 changes: 89 additions & 14 deletions pkg/driver/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ func (d *Driver) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolu

func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
klog.V(4).Infof("NodePublishVolume: called with args %+v", req)
mountOptions := []string{}

target := req.GetTargetPath()
if len(target) == 0 {
Expand Down Expand Up @@ -80,23 +81,27 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu
}
}

volumeId := req.GetVolumeId()
if !isValidFileSystemId(volumeId) {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID %s is invalid", volumeId))
fsid, vpath, apid, err := parseVolumeId(req.GetVolumeId())
if err != nil {
// parseVolumeId returns the appropriate error
return nil, err
}

var source string
tokens := strings.Split(volumeId, ":")
if len(tokens) == 1 {
// fs-xxxxxx
source = fmt.Sprintf("%s:%s", volumeId, subpath)
} else if len(tokens) == 2 {
// fs-xxxxxx:/a/b/c
cleanPath := path.Clean(tokens[1])
source = fmt.Sprintf("%s:%s", tokens[0], cleanPath)
// The `vpath` takes precedence if specified. If not specified, we'll either use the
// (deprecated) `path` from the volContext, or default to "/" from above.
if vpath != "" {
subpath = vpath
}
source := fmt.Sprintf("%s:%s", fsid, subpath)

// If an access point was specified, we need to include two things in the mountOptions:
// - The access point ID, properly prefixed. (Below, we'll check whether an access point was
// also specified in the incoming mount options and react appropriately.)
// - The TLS option. Access point mounts won't work without it. (For ease of use, we won't
// require this to be present in the mountOptions already, but we won't complain if it is.)
if apid != "" {
mountOptions = append(mountOptions, fmt.Sprintf("accesspoint=%s", apid), "tls")
}

mountOptions := []string{}
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
Expand All @@ -111,6 +116,26 @@ func (d *Driver) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu
return false
}
for _, f := range m.MountFlags {
// Special-case check for access point
// Not sure if `accesspoint` is allowed to have mixed case, but this shouldn't hurt,
// and it simplifies both matches (HasPrefix, hasOption) below.
f = strings.ToLower(f)
if strings.HasPrefix(f, "accesspoint=") {
// The MountOptions Access Point ID
moapid := f[12:]
// No matter what, warn that this is not the right way to specify an access point
klog.Warning(fmt.Sprintf(
"Use of 'accesspoint' under mountOptions is deprecated with this driver. "+
"Specify the access point in the volumeHandle instead, e.g. 'volumeHandle: %s:%s:%s'",
fsid, subpath, moapid))
// If they specified the same access point in both places, let it slide; otherwise, fail.
if apid != "" && moapid != apid {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf(
"Found conflicting access point IDs in mountOptions (%s) and volumeHandle (%s)", moapid, apid))
}
// Fall through; the code below will uniq for us.
}

if !hasOption(mountOptions, f) {
mountOptions = append(mountOptions, f)
}
Expand Down Expand Up @@ -217,6 +242,56 @@ func (d *Driver) isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) bool
return foundAll
}

// parseVolumeId accepts a NodePublishVolumeRequest.VolumeId as a colon-delimited string of the
// form `{fileSystemID}:{mountPath}:{accessPointID}`.
// - The `{fileSystemID}` is required, and expected to be of the form `fs-...`.
// - The other two fields are optional -- they may be empty or omitted entirely. For example,
// `fs-abcd1234::`, `fs-abcd1234:`, and `fs-abcd1234` are equivalent.
// - The `{mountPath}`, if specified, is not required to be absolute.
// - The `{accessPointID}` is expected to be of the form `fsap-...`.
// parseVolumeId returns the parsed values, of which `subpath` and `apid` may be empty; and an
// error, which will be a `status.Error` with `codes.InvalidArgument`, or `nil` if the `volumeId`
// was parsed successfully.
// See the following issues for some background:
// - https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/100
// - https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/167
func parseVolumeId(volumeId string) (fsid, subpath, apid string, err error) {
// Might as well do this up front, since the FSID is required and first in the string
if !isValidFileSystemId(volumeId) {
err = status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID '%s' is invalid: Expected a file system ID of the form 'fs-...'", volumeId))
return
}

tokens := strings.Split(volumeId, ":")
if len(tokens) > 3 {
err = status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID '%s' is invalid: Expected at most three fields separated by ':'", volumeId))
return
}

// Okay, we know we have a FSID
fsid = tokens[0]

// Do we have a subpath?
if len(tokens) >= 2 && tokens[1] != "" {
subpath = path.Clean(tokens[1])
}

// Do we have an access point ID?
if len(tokens) == 3 && tokens[2] != "" {
apid = tokens[2]
if !isValidAccessPointId(apid) {
err = status.Error(codes.InvalidArgument, fmt.Sprintf("volume ID '%s' has an invalid access point ID '%s': Expected it to be of the form 'fsap-...'", volumeId, apid))
return
}
}

return
}

func isValidFileSystemId(filesystemId string) bool {
return strings.HasPrefix(filesystemId, "fs-")
}

func isValidAccessPointId(accesspointId string) bool {
return strings.HasPrefix(accesspointId, "fsap-")
}
176 changes: 166 additions & 10 deletions pkg/driver/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,12 @@ import (
func TestNodePublishVolume(t *testing.T) {

var (
endpoint = "endpoint"
nodeID = "nodeID"
volumeId = "fs-volumeId"
targetPath = "/target/path"
stdVolCap = &csi.VolumeCapability{
endpoint = "endpoint"
nodeID = "nodeID"
volumeId = "fs-volumeId"
accessPointID = "fsap-abcd1234"
targetPath = "/target/path"
stdVolCap = &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
Expand All @@ -49,6 +50,7 @@ func TestNodePublishVolume(t *testing.T) {
expectMakeDir bool
mountArgs []interface{}
mountSuccess bool
// TODO: Make this `expectError string` (use "" for successes)
expectSuccess bool
}{
{
Expand All @@ -63,6 +65,30 @@ func TestNodePublishVolume(t *testing.T) {
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: empty path",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + ":",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/", targetPath, "efs", gomock.Any()},
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: empty path and access point",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + "::",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/", targetPath, "efs", gomock.Any()},
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: normal with read only mount",
req: &csi.NodePublishVolumeRequest{
Expand Down Expand Up @@ -98,7 +124,8 @@ func TestNodePublishVolume(t *testing.T) {
expectSuccess: true,
},
{
name: "success: normal with path volume context",
// TODO: Validate deprecation warning
name: "success: normal with path in volume context",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId,
VolumeCapability: stdVolCap,
Expand All @@ -110,16 +137,130 @@ func TestNodePublishVolume(t *testing.T) {
mountSuccess: true,
expectSuccess: true,
},
{
name: "fail: path in volume context must be absolute",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId,
VolumeCapability: stdVolCap,
TargetPath: targetPath,
VolumeContext: map[string]string{"path": "a/b"},
},
expectMakeDir: false,
expectSuccess: false,
},
{
name: "success: normal with path in volume handle",
req: &csi.NodePublishVolumeRequest{
// This also shows that the path gets cleaned
VolumeId: volumeId + ":/a/b/",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/a/b", targetPath, "efs", gomock.Any()},
mountSuccess: false,
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: normal with path in volume handle, empty access point",
req: &csi.NodePublishVolumeRequest{
// This also shows that relative paths are allowed when specified via volume handle
VolumeId: volumeId + ":a/b/:",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":a/b", targetPath, "efs", gomock.Any()},
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: path in volume handle takes precedence",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + ":/a/b/",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
VolumeContext: map[string]string{"path": "/c/d"},
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/a/b", targetPath, "efs", gomock.Any()},
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: access point in volume handle, no path",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + "::" + accessPointID,
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/", targetPath, "efs", []string{"accesspoint=" + accessPointID, "tls"}},
mountSuccess: true,
expectSuccess: true,
},
{
name: "success: path and access point in volume handle",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + ":/a/b:" + accessPointID,
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/a/b", targetPath, "efs", []string{"accesspoint=" + accessPointID, "tls"}},
mountSuccess: true,
expectSuccess: true,
},
{
// TODO: Validate deprecation warning
name: "success: same access point in volume handle and mount options",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + "::" + accessPointID,
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
// This also shows we allow the `tls` option to exist already
MountFlags: []string{"tls", "accesspoint=" + accessPointID},
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
TargetPath: targetPath,
},
expectMakeDir: true,
mountArgs: []interface{}{volumeId + ":/", targetPath, "efs", []string{"accesspoint=" + accessPointID, "tls"}},
mountSuccess: true,
expectSuccess: true,
},
{
name: "fail: conflicting access point in volume handle and mount options",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + "::" + accessPointID,
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
MountFlags: []string{"tls", "accesspoint=fsap-deadbeef"},
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
TargetPath: targetPath,
},
expectMakeDir: false,
expectSuccess: false,
},
{
name: "fail: too many fields in volume handle",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + ":/a/b/::four!",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: false,
expectSuccess: false,
},
{
Expand Down Expand Up @@ -212,6 +353,16 @@ func TestNodePublishVolume(t *testing.T) {
expectMakeDir: false,
expectSuccess: false,
},
{
name: "fail: invalid access point ID",
req: &csi.NodePublishVolumeRequest{
VolumeId: volumeId + "::invalid-id",
VolumeCapability: stdVolCap,
TargetPath: targetPath,
},
expectMakeDir: false,
expectSuccess: false,
},
}

for _, tc := range testCases {
Expand Down Expand Up @@ -242,12 +393,17 @@ func TestNodePublishVolume(t *testing.T) {
mockMounter.EXPECT().Mount(tc.mountArgs[0], tc.mountArgs[1], tc.mountArgs[2], tc.mountArgs[3]).Return(err)
}

_, err := driver.NodePublishVolume(ctx, tc.req)
ret, err := driver.NodePublishVolume(ctx, tc.req)
if !tc.expectSuccess && err == nil {
t.Fatalf("NodePublishVolume is not failed")
}
if tc.expectSuccess && err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
if tc.expectSuccess {
if err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
}
if ret == nil {
t.Fatalf("Expected non-nil return value")
}
}

mockCtrl.Finish()
Expand Down

0 comments on commit f99c4b3

Please sign in to comment.