forked from hashicorp/terraform-provider-vsphere
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathresource_vsphere_nas_datastore.go
265 lines (232 loc) · 7.73 KB
/
resource_vsphere_nas_datastore.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
package vsphere
import (
"errors"
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"github.com/vmware/govmomi/vim25/types"
)
// formatNasDatastoreIDMismatch is a error message format string that is given
// when two NAS datastore IDs mismatch.
const formatNasDatastoreIDMismatch = "datastore ID on host %q (%s) does not original datastore ID (%s)"
func resourceVSphereNasDatastore() *schema.Resource {
s := map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Description: "The name of the datastore.",
Required: true,
},
"host_system_ids": &schema.Schema{
Type: schema.TypeSet,
Description: "The managed object IDs of the hosts to mount the datastore on.",
Elem: &schema.Schema{Type: schema.TypeString},
MinItems: 1,
Required: true,
},
"folder": &schema.Schema{
Type: schema.TypeString,
Description: "The path to the datastore folder to put the datastore in.",
Optional: true,
StateFunc: normalizeFolderPath,
},
}
mergeSchema(s, schemaHostNasVolumeSpec())
mergeSchema(s, schemaDatastoreSummary())
// Add tags schema
s[vSphereTagAttributeKey] = tagsSchema()
return &schema.Resource{
Create: resourceVSphereNasDatastoreCreate,
Read: resourceVSphereNasDatastoreRead,
Update: resourceVSphereNasDatastoreUpdate,
Delete: resourceVSphereNasDatastoreDelete,
Importer: &schema.ResourceImporter{
State: resourceVSphereNasDatastoreImport,
},
Schema: s,
}
}
func resourceVSphereNasDatastoreCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
// Load up the tags client, which will validate a proper vCenter before
// attempting to proceed if we have tags defined.
tagsClient, err := tagsClientIfDefined(d, meta)
if err != nil {
return err
}
hosts := sliceInterfacesToStrings(d.Get("host_system_ids").(*schema.Set).List())
p := &nasDatastoreMountProcessor{
client: client,
oldHSIDs: nil,
newHSIDs: hosts,
volSpec: expandHostNasVolumeSpec(d),
}
ds, err := p.processMountOperations()
if ds != nil {
d.SetId(ds.Reference().Value)
}
if err != nil {
return fmt.Errorf("error mounting datastore: %s", err)
}
// Move the datastore to the correct folder first, if specified.
folder := d.Get("folder").(string)
if !pathIsEmpty(folder) {
if err := moveDatastoreToFolderRelativeHostSystemID(client, ds, hosts[0], folder); err != nil {
return fmt.Errorf("error moving datastore to folder: %s", err)
}
}
// Apply any pending tags now
if tagsClient != nil {
if err := processTagDiff(tagsClient, d, ds); err != nil {
return err
}
}
// Done
return resourceVSphereNasDatastoreRead(d, meta)
}
func resourceVSphereNasDatastoreRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
id := d.Id()
ds, err := datastoreFromID(client, id)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
props, err := datastoreProperties(ds)
if err != nil {
return fmt.Errorf("could not get properties for datastore: %s", err)
}
if err := flattenDatastoreSummary(d, &props.Summary); err != nil {
return err
}
// Set the folder
folder, err := rootPathParticleDatastore.SplitRelativeFolder(ds.InventoryPath)
if err != nil {
return fmt.Errorf("error parsing datastore path %q: %s", ds.InventoryPath, err)
}
d.Set("folder", normalizeFolderPath(folder))
// Update NAS spec
if err := flattenHostNasVolume(d, props.Info.(*types.NasDatastoreInfo).Nas); err != nil {
return err
}
// Update mounted hosts
var mountedHosts []string
for _, mount := range props.Host {
mountedHosts = append(mountedHosts, mount.Key.Value)
}
if err := d.Set("host_system_ids", mountedHosts); err != nil {
return err
}
// Read tags if we have the ability to do so
if tagsClient, _ := meta.(*VSphereClient).TagsClient(); tagsClient != nil {
if err := readTagsForResource(tagsClient, ds, d); err != nil {
return err
}
}
return nil
}
func resourceVSphereNasDatastoreUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
// Load up the tags client, which will validate a proper vCenter before
// attempting to proceed if we have tags defined.
tagsClient, err := tagsClientIfDefined(d, meta)
if err != nil {
return err
}
id := d.Id()
ds, err := datastoreFromID(client, id)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
// Rename this datastore if our name has drifted.
if d.HasChange("name") {
if err := renameObject(client, ds.Reference(), d.Get("name").(string)); err != nil {
return err
}
}
// Update folder if necessary
if d.HasChange("folder") {
folder := d.Get("folder").(string)
if err := moveDatastoreToFolder(client, ds, folder); err != nil {
return fmt.Errorf("could not move datastore to folder %q: %s", folder, err)
}
}
// Apply any pending tags now
if tagsClient != nil {
if err := processTagDiff(tagsClient, d, ds); err != nil {
return err
}
}
// Process mount/unmount operations.
o, n := d.GetChange("host_system_ids")
p := &nasDatastoreMountProcessor{
client: client,
oldHSIDs: sliceInterfacesToStrings(o.(*schema.Set).List()),
newHSIDs: sliceInterfacesToStrings(n.(*schema.Set).List()),
volSpec: expandHostNasVolumeSpec(d),
ds: ds,
}
// Unmount first
if err := p.processUnmountOperations(); err != nil {
return fmt.Errorf("error unmounting hosts: %s", err)
}
// Now mount
if _, err := p.processMountOperations(); err != nil {
return fmt.Errorf("error mounting hosts: %s", err)
}
// Should be done with the update here.
return resourceVSphereNasDatastoreRead(d, meta)
}
func resourceVSphereNasDatastoreDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*VSphereClient).vimClient
dsID := d.Id()
ds, err := datastoreFromID(client, dsID)
if err != nil {
return fmt.Errorf("cannot find datastore: %s", err)
}
// Unmount the datastore from every host. Once the last host is unmounted we
// are done and the datastore will delete itself.
hosts := sliceInterfacesToStrings(d.Get("host_system_ids").(*schema.Set).List())
p := &nasDatastoreMountProcessor{
client: client,
oldHSIDs: hosts,
newHSIDs: nil,
volSpec: expandHostNasVolumeSpec(d),
ds: ds,
}
if err := p.processUnmountOperations(); err != nil {
return fmt.Errorf("error unmounting hosts: %s", err)
}
return nil
}
func resourceVSphereNasDatastoreImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
// We support importing a MoRef - so we need to load the datastore and check
// to make sure 1) it exists, and 2) it's a VMFS datastore. If it is, we are
// good to go (rest of the stuff will be handled by read on refresh).
client := meta.(*VSphereClient).vimClient
id := d.Id()
ds, err := datastoreFromID(client, id)
if err != nil {
return nil, fmt.Errorf("cannot find datastore: %s", err)
}
props, err := datastoreProperties(ds)
if err != nil {
return nil, fmt.Errorf("could not get properties for datastore: %s", err)
}
t := types.HostFileSystemVolumeFileSystemType(props.Summary.Type)
if !isNasVolume(t) {
return nil, fmt.Errorf("datastore ID %q is not a NAS datastore", id)
}
var accessMode string
for _, hostMount := range props.Host {
switch {
case accessMode == "":
accessMode = hostMount.MountInfo.AccessMode
case accessMode != "" && accessMode != hostMount.MountInfo.AccessMode:
// We don't support selective mount modes across multiple hosts. This
// should almost never happen (there's no way to do it in the UI so it
// would need to be done manually). Nonetheless we need to fail here.
return nil, errors.New("access_mode is inconsistent across configured hosts")
}
}
d.Set("access_mode", accessMode)
d.Set("type", t)
return []*schema.ResourceData{d}, nil
}