Skip to content

Commit

Permalink
nfs: support client access control for NFS volumes
Browse files Browse the repository at this point in the history
adding functionality and tests for issue #2283

Signed-off-by: Tyler Sullens <tsullens@mix.wvu.edu>
  • Loading branch information
tsullens committed Dec 19, 2018
1 parent ed1d01b commit af6382f
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 80 deletions.
59 changes: 39 additions & 20 deletions pkg/operator/nfs/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,13 @@ func nfsOwnerRef(namespace, nfsServerID string) metav1.OwnerReference {
}
}

func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]map[string]string {
claimConfigOpt := make(map[string]map[string]string)
configOpt := make(map[string]string)
func getServerConfig(exports []nfsv1alpha1.ExportsSpec) map[string]nfsv1alpha1.ServerSpec {
claimConfigOpt := make(map[string]nfsv1alpha1.ServerSpec)

for _, export := range exports {
claimName := export.PersistentVolumeClaim.ClaimName
if claimName != "" {
configOpt["accessMode"] = export.Server.AccessMode
configOpt["squash"] = export.Server.Squash
claimConfigOpt[claimName] = configOpt
claimConfigOpt[claimName] = export.Server
}
}

Expand Down Expand Up @@ -182,16 +179,17 @@ func (c *Controller) createNFSService(nfsServer *nfsServer) error {
return nil
}

func createGaneshaExport(id int, path string, access string, squash string) string {
var accessType string
// validateNFSServerSpec guarantees `access` will be one of these values at this point
switch s.ToLower(access) {
case "readwrite":
accessType = "RW"
case "readonly":
accessType = "RO"
case "none":
accessType = "None"
func createGaneshaExport(id int, path string, serverConfig nfsv1alpha1.ServerSpec) string {

ganeshaClientConfigs := make([]string, 0)
for _, clientConfig := range serverConfig.AllowedClients {
ganeshaConf := `
CLIENT {
Clients = ` + s.Join(clientConfig.Clients, ", ") + `;
Access_Type = ` + accessType(clientConfig.AccessMode) + `;
Squash = ` + clientConfig.Squash + `;
}`
ganeshaClientConfigs = append(ganeshaClientConfigs, ganeshaConf)
}

idStr := fmt.Sprintf("%v", id)
Expand All @@ -203,23 +201,36 @@ EXPORT {
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = ` + accessType + `;
Squash = ` + s.ToLower(squash) + `;
Access_Type = ` + accessType(serverConfig.AccessMode) + `;
Squash = ` + s.ToLower(serverConfig.Squash) + `;
FSAL {
Name = VFS;
}
}` + s.Join(ganeshaClientConfigs, "") + `
}`

return nfsGaneshaConfig
}

func accessType(val string) string {
switch s.ToLower(val) {
case "readwrite":
return "RW"
case "readonly":
return "RO"
case "none":
return "None"
default:
return ""
}
}

func createGaneshaConfig(spec *nfsv1alpha1.NFSServerSpec) string {
serverConfig := getServerConfig(spec.Exports)

exportsList := make([]string, 0)
id := 10
for claimName, claimConfig := range serverConfig {
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig["accessMode"], claimConfig["squash"]))
exportsList = append(exportsList, createGaneshaExport(id, claimName, claimConfig))
id++
}

Expand Down Expand Up @@ -447,6 +458,14 @@ func validateNFSServerSpec(spec nfsv1alpha1.NFSServerSpec) error {
if err := validateSquashMode(export.Server.Squash); err != nil {
return err
}
for _, client := range export.Server.AllowedClients {
if err := validateAccessMode(client.AccessMode); err != nil {
return err
}
if err := validateSquashMode(client.Squash); err != nil {
return err
}
}
}
return nil
}
Expand Down
138 changes: 92 additions & 46 deletions pkg/operator/nfs/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ package nfs

import (
"fmt"
"strings"
"testing"

nfsv1alpha1 "github.com/rook/rook/pkg/apis/nfs.rook.io/v1alpha1"
Expand All @@ -30,63 +29,110 @@ import (
"k8s.io/client-go/kubernetes/fake"
)

func TestValidateNFSServerSpec(t *testing.T) {

// first, test that a good NFSServerSpec is good
spec := nfsv1alpha1.NFSServerSpec{
Replicas: 1,
Exports: []nfsv1alpha1.ExportsSpec{
{
Name: "test",
Server: nfsv1alpha1.ServerSpec{
AccessMode: "readwrite",
Squash: "none",
func TestOnAddComplexServer(t *testing.T) {
namespace := "rook-nfs-test"
nfsserver := &nfsv1alpha1.NFSServer{
ObjectMeta: metav1.ObjectMeta{
Name: "nfs-server-X",
Namespace: namespace,
},
Spec: nfsv1alpha1.NFSServerSpec{
Replicas: 1,
Exports: []nfsv1alpha1.ExportsSpec{
{
Name: "export-test",
Server: nfsv1alpha1.ServerSpec{
AccessMode: "ReadWrite",
Squash: "none",
AllowedClients: []nfsv1alpha1.AllowedClientsSpec{
{
Name: "client-test-1",
Clients: []string{"172.17.0.5"},
AccessMode: "ReadOnly",
Squash: "root",
},
{
Name: "client-test-2",
Clients: []string{"172.17.0.0/16", "serverX"},
AccessMode: "ReadWrite",
Squash: "none",
},
},
},
PersistentVolumeClaim: v1.PersistentVolumeClaimVolumeSource{
ClaimName: "test-claim",
},
},
},
},
}

err := validateNFSServerSpec(spec)
// initialize the controller and its dependencies
clientset := testop.New(3)
context := &clusterd.Context{Clientset: clientset}
controller := NewController(context, "rook/nfs:mockTag")

// in a background thread, simulate the pods running (fake statefulsets don't automatically do that)
go simulatePodsRunning(clientset, namespace, nfsserver.Spec.Replicas)

// call onAdd given the specified nfsserver
controller.onAdd(nfsserver)

// verify client service
clientService, err := clientset.CoreV1().Services(namespace).Get(appName, metav1.GetOptions{})
assert.Nil(t, err)
assert.NotNil(t, clientService)
assert.Equal(t, v1.ServiceTypeClusterIP, clientService.Spec.Type)

// test that AccessMode is invalid
spec = nfsv1alpha1.NFSServerSpec{
Replicas: 1,
Exports: []nfsv1alpha1.ExportsSpec{
{
Name: "test",
Server: nfsv1alpha1.ServerSpec{
AccessMode: "badValue",
Squash: "none",
},
},
},
// verify nfs-ganesha config in the configmap
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(nfsConfigMapName, metav1.GetOptions{})
assert.Nil(t, err)
assert.NotNil(t, configMap)
nfsGaneshaConfig := `
EXPORT {
Export_Id = 10;
Path = /test-claim;
Pseudo = /test-claim;
Protocols = 4;
Transports = TCP;
Sectype = sys;
Access_Type = RW;
Squash = none;
FSAL {
Name = VFS;
}

err = validateNFSServerSpec(spec)
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), "Invalid value (badValue) for accessMode"))

// test that Squash is invalid
spec = nfsv1alpha1.NFSServerSpec{
Replicas: 1,
Exports: []nfsv1alpha1.ExportsSpec{
{
Name: "test",
Server: nfsv1alpha1.ServerSpec{
AccessMode: "ReadWrite",
Squash: "badValue",
},
},
},
CLIENT {
Clients = 172.17.0.5;
Access_Type = RO;
Squash = root;
}
CLIENT {
Clients = 172.17.0.0/16, serverX;
Access_Type = RW;
Squash = none;
}
}
NFS_Core_Param
{
fsid_device = true;
}`
assert.Equal(t, nfsGaneshaConfig, configMap.Data[nfsConfigMapName])

// verify stateful set
ss, err := clientset.AppsV1beta1().StatefulSets(namespace).Get(appName, metav1.GetOptions{})
assert.Nil(t, err)
assert.NotNil(t, ss)
assert.Equal(t, int32(1), *ss.Spec.Replicas)
assert.Equal(t, 1, len(ss.Spec.Template.Spec.Containers))

err = validateNFSServerSpec(spec)
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), "Invalid value (badValue) for squash"))
container := ss.Spec.Template.Spec.Containers[0]
assert.Equal(t, 2, len(container.VolumeMounts))

expectedVolumeMounts := []v1.VolumeMount{{Name: "test-claim", MountPath: "/test-claim"}, {Name: "nfs-ganesha-config", MountPath: "/nfs-ganesha/config"}}
assert.Equal(t, expectedVolumeMounts, container.VolumeMounts)
}

func TestOnAdd(t *testing.T) {
func TestOnAddSimpleServer(t *testing.T) {
namespace := "rook-nfs-test"
nfsserver := &nfsv1alpha1.NFSServer{
ObjectMeta: metav1.ObjectMeta{
Expand Down
22 changes: 11 additions & 11 deletions tests/framework/clients/read_write.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,18 @@ func CreateReadWriteOperation(k8sh *utils.K8sHelper) *ReadWriteOperation {
}

// CreateWriteClient Function to create a nfs client in rook
func (f *ReadWriteOperation) CreateWriteClient(volName string) ([]string, error) {
func (f *ReadWriteOperation) CreateWriteClient(name string, volName string) ([]string, error) {
logger.Infof("creating the filesystem via replication controller")
writerSpec := getReplicationController(volName)
writerSpec := getReplicationController(name, volName)

if _, err := f.k8sh.ResourceOperation("create", writerSpec); err != nil {
return nil, err
}

assert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState("read-write-test", "default", 2, "Running"),
assert.True(f.k8sh.T(), f.k8sh.CheckPodCountAndState(name, "default", 2, "Running"),
"Make sure there are two read-write-test pods present in Running state")

podList, err := f.k8sh.GetPodNamesForApp("read-write-test", "default")
podList, err := f.k8sh.GetPodNamesForApp(name, "default")
if err != nil {
return nil, err
}
Expand All @@ -54,8 +54,8 @@ func (f *ReadWriteOperation) CreateWriteClient(volName string) ([]string, error)
}

// Delete Function to delete a nfs consuming pod in rook
func (f *ReadWriteOperation) Delete() (string, error) {
return f.k8sh.DeleteResource("rc", "read-write-test")
func (f *ReadWriteOperation) Delete(name string) (string, error) {
return f.k8sh.DeleteResource("rc", name)
}

// Read Function to read from nfs mount point created by rook ,i.e. Read data from a pod that has an nfs export mounted
Expand All @@ -68,26 +68,26 @@ func (f *ReadWriteOperation) Read(name string) (string, error) {

result, err := f.k8sh.Kubectl(args...)
if err != nil {
return "", fmt.Errorf("Unable to write data to pod -- : %s", err)
return "", fmt.Errorf("Unable to read data from pod -- : %s", err)

}
return result, nil

}

func getReplicationController(volName string) string {
func getReplicationController(name string, volName string) string {
return `apiVersion: v1
kind: ReplicationController
metadata:
name: read-write-test
name: ` + name + `
spec:
replicas: 2
selector:
app: read-write-test
app: ` + name + `
template:
metadata:
labels:
app: read-write-test
app: ` + name + `
spec:
containers:
- image: alpine
Expand Down
2 changes: 1 addition & 1 deletion tests/framework/installer/nfs_manifests.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ spec:
- name: nfs-share
server:
accessMode: ReadWrite
squash: "none"
squash: root
persistentVolumeClaim:
claimName: test-claim
`
Expand Down
23 changes: 21 additions & 2 deletions tests/integration/nfs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ func (suite *NfsSuite) TestNfsServerInstallation() {
// verify nfs server storage
assert.True(suite.T(), true, suite.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim"))

defer suite.rwClient.Delete()
podList, err := suite.rwClient.CreateWriteClient("nfs-pv-claim")
defer suite.rwClient.Delete("read-write-test")
podList, err := suite.rwClient.CreateWriteClient("read-write-test", "nfs-pv-claim")
require.NoError(suite.T(), err)
assert.True(suite.T(), true, suite.checkReadData(podList))
}
Expand Down Expand Up @@ -142,3 +142,22 @@ func (suite *NfsSuite) checkReadData(podList []string) bool {

return false
}

func (suite *NfsSuite) TestNfsServerClients() {

// verify nfs server operator is running OK
assert.True(suite.T(), suite.k8shelper.CheckPodCountAndState("rook-nfs-operator", suite.systemNamespace, 1, "Running"),
"1 rook-nfs-operator must be in Running state")

// verify nfs server instances are running OK
assert.True(suite.T(), suite.k8shelper.CheckPodCountAndState("rook-nfs", suite.namespace, suite.instanceCount, "Running"),
fmt.Sprintf("%d rook-nfs pods must be in Running state", suite.instanceCount))

// verify nfs server storage
assert.True(suite.T(), true, suite.k8shelper.WaitUntilPVCIsBound("default", "nfs-pv-claim"))

defer suite.rwClient.Delete("write-client-test")
podList, err := suite.rwClient.CreateWriteClient("write-client-test", "nfs-pv-claim")
require.NoError(suite.T(), err)
assert.True(suite.T(), true, suite.checkReadData(podList))
}

0 comments on commit af6382f

Please # to comment.