From 8be97fba21c4934c980e6f79388003cda0d4706d Mon Sep 17 00:00:00 2001 From: Brice Figureau Date: Wed, 30 Jan 2019 16:01:22 +0100 Subject: [PATCH 01/96] Fix #73479 AWS NLB target groups missing tags `elbv2.AddTags` doesn't seem to support assigning the same set of tags to multiple resources at once leading to the following error: Error adding tags after modifying load balancer targets: "ValidationError: Only one resource can be tagged at a time" This can happen when using AWS NLB with multiple listeners pointing to different node ports. When k8s creates a NLB it creates a target group per listener along with installing security group ingress rules allowing the traffic to reach the k8s nodes. Unfortunately if those target groups are not tagged, k8s will not manage them, thinking it is not the owner. This small changes assigns tags one resource at a time instead of batching them as before. Signed-off-by: Brice Figureau --- .../providers/aws/aws_loadbalancer.go | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index f6f523254110a..d8bff9a4f6704 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -138,10 +138,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa loadBalancer = createResponse.LoadBalancers[0] // Create Target Groups - addTagsInput := &elbv2.AddTagsInput{ - ResourceArns: []*string{}, - Tags: []*elbv2.Tag{}, - } + resourceArns := make([]*string, 0, len(mappings)) for i := range mappings { // It is easier to keep track of updates by having possibly @@ -150,20 +147,28 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa if err != nil { return nil, fmt.Errorf("Error creating listener: %q", err) } - addTagsInput.ResourceArns = append(addTagsInput.ResourceArns, targetGroupArn) + resourceArns = append(resourceArns, targetGroupArn) } // Add tags to targets + targetGroupTags := make([]*elbv2.Tag, 0, len(tags)) + for k, v := range tags { - addTagsInput.Tags = append(addTagsInput.Tags, &elbv2.Tag{ + targetGroupTags = append(targetGroupTags, &elbv2.Tag{ Key: aws.String(k), Value: aws.String(v), }) } - if len(addTagsInput.ResourceArns) > 0 && len(addTagsInput.Tags) > 0 { - _, err = c.elbv2.AddTags(addTagsInput) - if err != nil { - return nil, fmt.Errorf("Error adding tags after creating Load Balancer: %q", err) + if len(resourceArns) > 0 && len(targetGroupTags) > 0 { + // elbv2.AddTags doesn't allow to tag multiple resources at once + for _, arn := range resourceArns { + _, err = c.elbv2.AddTags(&elbv2.AddTagsInput{ + ResourceArns: []*string{arn}, + Tags: targetGroupTags, + }) + if err != nil { + return nil, fmt.Errorf("Error adding tags after creating Load Balancer: %q", err) + } } } } else { From ab8b8e68eb702034ec78e8b66b0769cc94a8957c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 18 Feb 2019 05:20:35 +0000 Subject: [PATCH 02/96] remove get azure accounts in the init process set timeout for get azure account operation use const for timeout value remove get azure accounts in the init process add lock for account init --- pkg/cloudprovider/providers/azure/azure.go | 18 ++------------- .../azure/azure_blobDiskController.go | 23 +++++++++++-------- .../azure/azure_managedDiskController.go | 4 ---- 3 files changed, 15 insertions(+), 30 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index ed6c9debd2d02..a1df0eb6b312e 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -457,22 +457,8 @@ func initDiskControllers(az *Cloud) error { cloud: az, } - // BlobDiskController: contains the function needed to - // create/attach/detach/delete blob based (unmanaged disks) - blobController, err := newBlobDiskController(common) - if err != nil { - return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error()) - } - - // ManagedDiskController: contains the functions needed to - // create/attach/detach/delete managed disks - managedController, err := newManagedDiskController(common) - if err != nil { - return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error()) - } - - az.BlobDiskController = blobController - az.ManagedDiskController = managedController + az.BlobDiskController = &BlobDiskController{common: common} + az.ManagedDiskController = &ManagedDiskController{common: common} az.controllerCommon = common return nil diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 093bfc03714c8..ae7029c9cc2b7 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -62,18 +62,19 @@ var ( accountsLock = &sync.Mutex{} ) -func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { - c := BlobDiskController{common: common} +func (c *BlobDiskController) initStorageAccounts() { + accountsLock.Lock() + defer accountsLock.Unlock() - // get accounts - accounts, err := c.getAllStorageAccounts() - if err != nil { - glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) - c.accounts = make(map[string]*storageAccountState) - return &c, nil + if c.accounts == nil { + // get accounts + accounts, err := c.getAllStorageAccounts() + if err != nil { + glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) + c.accounts = make(map[string]*storageAccountState) + } + c.accounts = accounts } - c.accounts = accounts - return &c, nil } // CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. @@ -217,6 +218,8 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) { glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) + c.initStorageAccounts() + storageAccountName, err := c.findSANameForDisk(storageAccountType) if err != nil { return "", err diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 946314c39a345..89686b10a30b5 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -58,10 +58,6 @@ type ManagedDiskOptions struct { StorageAccountType storage.SkuName } -func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) { - return &ManagedDiskController{common: common}, nil -} - //CreateManagedDisk : create managed disk func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) { var err error From 7fb4aaeac57f6ab0df3e129d5d9fe8ee63880cd7 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 22 Feb 2019 06:50:48 +0000 Subject: [PATCH 03/96] add timeout in GetVolumeLimits operation add timeout for getAllStorageAccounts --- .../providers/azure/azure_blobDiskController.go | 3 ++- pkg/volume/azure_dd/azure_dd.go | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index ae7029c9cc2b7..1212892662d09 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -18,6 +18,7 @@ package azure import ( "bytes" + "context" "encoding/binary" "fmt" "net/url" @@ -439,7 +440,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { } func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { - ctx, cancel := getContextWithCancel() + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() accountListResult, err := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup) if err != nil { diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index b0aac8f257b44..5dfe7ff711285 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" @@ -160,7 +161,9 @@ func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) { } if vmSizeList == nil { - result, err := az.VirtualMachineSizesClient.List(context.TODO(), az.Location) + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + result, err := az.VirtualMachineSizesClient.List(ctx, az.Location) if err != nil || result.Value == nil { glog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location) return volumeLimits, nil From 24e5e9c32bb83ea51364872d03f63fcdb9c5133b Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 18 Feb 2019 08:29:37 +0000 Subject: [PATCH 04/96] add mixed protocol support for azure load balancer --- .../providers/azure/azure_loadbalancer.go | 129 ++++++++++-------- .../providers/azure/azure_standard.go | 6 +- .../providers/azure/azure_test.go | 2 +- 3 files changed, 74 insertions(+), 63 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 7374084b0a5b1..74394b4e4b73a 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -879,74 +879,85 @@ func (az *Cloud) reconcileLoadBalancerRule( var expectedProbes []network.Probe var expectedRules []network.LoadBalancingRule for _, port := range ports { - lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service)) - - glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) - - transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) - if err != nil { - return expectedProbes, expectedRules, err + protocols := []v1.Protocol{port.Protocol} + if v, ok := service.Annotations[ServiceAnnotationLoadBalancerMixedProtocols]; ok && v == "true" { + glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) flag(%s) is set", lbName, ServiceAnnotationLoadBalancerMixedProtocols) + if port.Protocol == v1.ProtocolTCP { + protocols = append(protocols, v1.ProtocolUDP) + } else if port.Protocol == v1.ProtocolUDP { + protocols = append(protocols, v1.ProtocolTCP) + } } - if serviceapi.NeedsHealthCheck(service) { - podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service) + for _, protocol := range protocols { + lbRuleName := az.getLoadBalancerRuleName(service, protocol, port.Port, subnet(service)) + glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - RequestPath: to.StringPtr(podPresencePath), - Protocol: network.ProbeProtocolHTTP, - Port: to.Int32Ptr(podPresencePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), - }, - }) - } else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { - // we only add the expected probe if we're doing TCP - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - Protocol: *probeProto, - Port: to.Int32Ptr(port.NodePort), - IntervalInSeconds: to.Int32Ptr(5), - NumberOfProbes: to.Int32Ptr(2), - }, - }) - } + transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(protocol) + if err != nil { + return expectedProbes, expectedRules, err + } - loadDistribution := network.Default - if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - loadDistribution = network.SourceIP - } + if serviceapi.NeedsHealthCheck(service) { + podPresencePath, podPresencePort := serviceapi.GetServiceHealthCheckPathPort(service) + + expectedProbes = append(expectedProbes, network.Probe{ + Name: &lbRuleName, + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + RequestPath: to.StringPtr(podPresencePath), + Protocol: network.ProbeProtocolHTTP, + Port: to.Int32Ptr(podPresencePort), + IntervalInSeconds: to.Int32Ptr(5), + NumberOfProbes: to.Int32Ptr(2), + }, + }) + } else if protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP { + // we only add the expected probe if we're doing TCP + expectedProbes = append(expectedProbes, network.Probe{ + Name: &lbRuleName, + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + Protocol: *probeProto, + Port: to.Int32Ptr(port.NodePort), + IntervalInSeconds: to.Int32Ptr(5), + NumberOfProbes: to.Int32Ptr(2), + }, + }) + } - expectedRule := network.LoadBalancingRule{ - Name: &lbRuleName, - LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ - Protocol: *transportProto, - FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(lbFrontendIPConfigID), - }, - BackendAddressPool: &network.SubResource{ - ID: to.StringPtr(lbBackendPoolID), + loadDistribution := network.Default + if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { + loadDistribution = network.SourceIP + } + + expectedRule := network.LoadBalancingRule{ + Name: &lbRuleName, + LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ + Protocol: *transportProto, + FrontendIPConfiguration: &network.SubResource{ + ID: to.StringPtr(lbFrontendIPConfigID), + }, + BackendAddressPool: &network.SubResource{ + ID: to.StringPtr(lbBackendPoolID), + }, + LoadDistribution: loadDistribution, + FrontendPort: to.Int32Ptr(port.Port), + BackendPort: to.Int32Ptr(port.Port), + EnableFloatingIP: to.BoolPtr(true), }, - LoadDistribution: loadDistribution, - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - EnableFloatingIP: to.BoolPtr(true), - }, - } - if port.Protocol == v1.ProtocolTCP { - expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout - } + } + if protocol == v1.ProtocolTCP { + expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout + } - // we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed - if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { - expectedRule.Probe = &network.SubResource{ - ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), + // we didn't construct the probe objects for UDP or SCTP because they're not used/needed/allowed + if protocol != v1.ProtocolUDP && protocol != v1.ProtocolSCTP { + expectedRule.Probe = &network.SubResource{ + ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, lbRuleName)), + } } - } - expectedRules = append(expectedRules, expectedRule) + expectedRules = append(expectedRules, expectedRule) + } } return expectedProbes, expectedRules, nil diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index 54b9e3cb11b41..56868e8b5a9fc 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -223,12 +223,12 @@ func getBackendPoolName(clusterName string) string { return clusterName } -func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, port v1.ServicePort, subnetName *string) string { +func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protocol, port int32, subnetName *string) string { prefix := az.getRulePrefix(service) if subnetName == nil { - return fmt.Sprintf("%s-%s-%d", prefix, port.Protocol, port.Port) + return fmt.Sprintf("%s-%s-%d", prefix, protocol, port) } - return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, port.Protocol, port.Port) + return fmt.Sprintf("%s-%s-%s-%d", prefix, *subnetName, protocol, port) } func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index e4e60bc6962f7..68c79bbb04a59 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -1210,7 +1210,7 @@ func validateLoadBalancer(t *testing.T, loadBalancer *network.LoadBalancer, serv } for _, wantedRule := range svc.Spec.Ports { expectedRuleCount++ - wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule, subnet(&svc)) + wantedRuleName := az.getLoadBalancerRuleName(&svc, wantedRule.Protocol, wantedRule.Port, subnet(&svc)) foundRule := false for _, actualRule := range *loadBalancer.LoadBalancingRules { if strings.EqualFold(*actualRule.Name, wantedRuleName) && From 00619b1cdeabbd92a72f37b1f8f60da2ed30b67e Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Tue, 12 Feb 2019 11:31:34 -0800 Subject: [PATCH 05/96] record event on endpoint update failure --- pkg/controller/endpoint/BUILD | 3 +++ .../endpoint/endpoints_controller.go | 21 ++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/pkg/controller/endpoint/BUILD b/pkg/controller/endpoint/BUILD index 400da5a208659..06ea49909de0c 100644 --- a/pkg/controller/endpoint/BUILD +++ b/pkg/controller/endpoint/BUILD @@ -29,9 +29,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 61c0f9205478d..3a6c976f0556f 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -32,9 +32,12 @@ import ( "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/api/v1/endpoints" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -71,6 +74,11 @@ const ( // NewEndpointController returns a new *EndpointController. func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer, endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface) *EndpointController { + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(glog.Infof) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"}) + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter()) } @@ -101,12 +109,16 @@ func NewEndpointController(podInformer coreinformers.PodInformer, serviceInforme e.endpointsLister = endpointsInformer.Lister() e.endpointsSynced = endpointsInformer.Informer().HasSynced + e.eventBroadcaster = broadcaster + e.eventRecorder = recorder return e } // EndpointController manages selector-based service endpoints. type EndpointController struct { - client clientset.Interface + client clientset.Interface + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder // serviceLister is able to list/get services and is populated by the shared informer passed to // NewEndpointController. @@ -522,6 +534,13 @@ func (e *EndpointController) syncService(key string) error { // Given the frequency of 1, we log at a lower level. glog.V(5).Infof("Forbidden from creating endpoints: %v", err) } + + if createEndpoints { + e.eventRecorder.Eventf(newEndpoints, v1.EventTypeWarning, "FailedToCreateEndpoint", "Failed to create endpoint for service %v/%v: %v", service.Namespace, service.Name, err) + } else { + e.eventRecorder.Eventf(newEndpoints, v1.EventTypeWarning, "FailedToUpdateEndpoint", "Failed to update endpoint %v/%v: %v", service.Namespace, service.Name, err) + } + return err } return nil From ae659f83ea7ad3642beece92ae14160625b36bf9 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 25 Feb 2019 07:02:35 +0000 Subject: [PATCH 06/96] fix parse devicePath issue on Azure Disk --- pkg/volume/azure_dd/attacher.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 3d38881ee1a3b..f446201acbb41 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -165,8 +165,16 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, nodeName := types.NodeName(a.plugin.host.GetHostName()) diskName := volumeSource.DiskName - var lun int32 - if runtime.GOOS == "windows" { + lun := int32(-1) + if runtime.GOOS != "windows" { + // on Linux, usually devicePath is like "/dev/disk/azure/scsi1/lun2", get LUN directly + lun, err = getDiskLUN(devicePath) + if err != nil { + glog.V(2).Infof("azureDisk - WaitForAttach: getDiskLUN(%s) failed with error: %v", devicePath, err) + } + } + + if lun < 0 { glog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", diskName, volumeSource.DataDiskURI, nodeName, devicePath) lun, err = diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName) @@ -174,11 +182,6 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, return "", err } glog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) - } else { - lun, err = getDiskLUN(devicePath) - if err != nil { - return "", err - } } exec := a.plugin.host.GetExec(a.plugin.GetPluginName()) From 73c3fc040daf7785b4d648c818307c15aba30cb7 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 20 Feb 2019 17:17:01 +0100 Subject: [PATCH 07/96] Fix scanning of failed targets If a iSCSI target is down while a volume is attached, reading from /sys/class/iscsi_host/host415/device/session383/connection383:0/iscsi_connection/connection383:0/address fails with an error. Kubelet should assume that such target is not available / logged in and try to relogin. Eventually, if such error persists, it should continue mounting the volume if the other paths are healthy instead of failing whole WaitForAttach(). --- pkg/volume/util/device_util_linux.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/pkg/volume/util/device_util_linux.go b/pkg/volume/util/device_util_linux.go index 2d1f43c83b000..8d0b4fa9fb36b 100644 --- a/pkg/volume/util/device_util_linux.go +++ b/pkg/volume/util/device_util_linux.go @@ -135,7 +135,8 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( targetNamePath := sessionPath + "/iscsi_session/" + sessionName + "/targetname" targetName, err := io.ReadFile(targetNamePath) if err != nil { - return nil, err + glog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err) + continue } // Ignore hosts that don't matchthe target we were looking for. @@ -147,7 +148,8 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( // for the iSCSI connection. dirs2, err := io.ReadDir(sessionPath) if err != nil { - return nil, err + glog.Infof("Failed to process session %s, assuming this session is unavailable: %s", sessionName, err) + continue } for _, dir2 := range dirs2 { // Skip over files that aren't the connection @@ -164,25 +166,29 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( addrPath := connectionPath + "/address" addr, err := io.ReadFile(addrPath) if err != nil { - return nil, err + glog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err) + continue } portPath := connectionPath + "/port" port, err := io.ReadFile(portPath) if err != nil { - return nil, err + glog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err) + continue } persistentAddrPath := connectionPath + "/persistent_address" persistentAddr, err := io.ReadFile(persistentAddrPath) if err != nil { - return nil, err + glog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err) + continue } persistentPortPath := connectionPath + "/persistent_port" persistentPort, err := io.ReadFile(persistentPortPath) if err != nil { - return nil, err + glog.Infof("Failed to process connection %s, assuming this connection is unavailable: %s", dirName, err) + continue } // Add entries to the map for both the current and persistent portals From ed93d8a99b37409834bf1303a2c5d6903f60e8c2 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Tue, 26 Feb 2019 12:48:35 +0000 Subject: [PATCH 08/96] Kubernetes version v1.12.7-beta.0 openapi-spec file updates --- api/openapi-spec/swagger.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 370d361b1d81e..c3197d70b265b 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubernetes", - "version": "v1.12.6" + "version": "v1.12.7" }, "paths": { "/api/": { From 46edb78c2eb8ac5529c6a68d861c68c645eb475c Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Fri, 22 Feb 2019 05:13:16 +0000 Subject: [PATCH 09/96] add retry for detach azure disk add more logging info in detach disk add more logging for azure disk attach/detach --- .../azure/azure_controller_standard.go | 25 +++++++++++++------ .../providers/azure/azure_controller_vmss.go | 22 ++++++++++------ 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index f665e4b94bdae..604cfa5d4eb67 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -74,7 +74,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI) ctx, cancel := getContextWithCancel() defer cancel() @@ -83,15 +83,15 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) if err != nil { - glog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err) + glog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err) detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName) + glog.V(2).Infof("azureDisk - err %v, try detach disk(%s, %s)", err, diskName, diskURI) as.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) + glog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) } return err } @@ -139,19 +139,28 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI) ctx, cancel := getContextWithCancel() defer cancel() // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - _, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) + if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) + retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) + } + } if err != nil { - glog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err) + glog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) } else { - glog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) + glog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) } + return err } diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index a804b4861a203..219f9a4d87adf 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -86,17 +86,17 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) defer ss.vmssVMCache.Delete(key) - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) if err != nil { detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName) + glog.Infof("azureDisk - err %s, try detach disk(%s, %s)", detail, diskName, diskURI) ss.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) + glog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) } return err } @@ -155,12 +155,20 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) defer ss.vmssVMCache.Delete(key) - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) - _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) + resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) + retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) + } + } if err != nil { - glog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) + glog.Errorf("azureDisk - detach disk(%s, %s) from %s failed, err: %v", diskName, diskURI, nodeName, err) } else { - glog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) + glog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) } return err From 721f6d1d9a898a40a20d03903ae1aa1836689fd1 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Tue, 26 Feb 2019 14:13:42 +0000 Subject: [PATCH 10/96] Add/Update CHANGELOG-1.12.md for v1.12.6. --- CHANGELOG-1.12.md | 182 +++++++++++++++++++++++++++++++++------------- 1 file changed, 131 insertions(+), 51 deletions(-) diff --git a/CHANGELOG-1.12.md b/CHANGELOG-1.12.md index a0ab2e1e4d45b..46619f99e70a6 100644 --- a/CHANGELOG-1.12.md +++ b/CHANGELOG-1.12.md @@ -1,45 +1,52 @@ -- [v1.12.5](#v1125) - - [Downloads for v1.12.5](#downloads-for-v1125) +- [v1.12.6](#v1126) + - [Downloads for v1.12.6](#downloads-for-v1126) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.12.4](#changelog-since-v1124) + - [Changelog since v1.12.5](#changelog-since-v1125) - [Other notable changes](#other-notable-changes) -- [v1.12.4](#v1124) - - [Downloads for v1.12.4](#downloads-for-v1124) +- [v1.12.5](#v1125) + - [Downloads for v1.12.5](#downloads-for-v1125) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.12.3](#changelog-since-v1123) - - [Action Required](#action-required) + - [Changelog since v1.12.4](#changelog-since-v1124) - [Other notable changes](#other-notable-changes-1) -- [v1.12.3](#v1123) - - [Downloads for v1.12.3](#downloads-for-v1123) +- [v1.12.4](#v1124) + - [Downloads for v1.12.4](#downloads-for-v1124) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.12.2](#changelog-since-v1122) + - [Changelog since v1.12.3](#changelog-since-v1123) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-2) -- [v1.12.2](#v1122) - - [Downloads for v1.12.2](#downloads-for-v1122) +- [v1.12.3](#v1123) + - [Downloads for v1.12.3](#downloads-for-v1123) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.12.1](#changelog-since-v1121) + - [Changelog since v1.12.2](#changelog-since-v1122) - [Other notable changes](#other-notable-changes-3) -- [v1.12.1](#v1121) - - [Downloads for v1.12.1](#downloads-for-v1121) +- [v1.12.2](#v1122) + - [Downloads for v1.12.2](#downloads-for-v1122) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.12.0](#changelog-since-v1120) + - [Changelog since v1.12.1](#changelog-since-v1121) - [Other notable changes](#other-notable-changes-4) -- [v1.12.0](#v1120) - - [Downloads for v1.12.0](#downloads-for-v1120) +- [v1.12.1](#v1121) + - [Downloads for v1.12.1](#downloads-for-v1121) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) + - [Changelog since v1.12.0](#changelog-since-v1120) + - [Other notable changes](#other-notable-changes-5) +- [v1.12.0](#v1120) + - [Downloads for v1.12.0](#downloads-for-v1120) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Known Issues](#known-issues) - [Major Themes](#major-themes) - [SIG API Machinery](#sig-api-machinery) @@ -61,7 +68,7 @@ - [Deprecations and removals](#deprecations-and-removals) - [New Features](#new-features) - [API Changes](#api-changes) - - [Other Notable Changes](#other-notable-changes-5) + - [Other Notable Changes](#other-notable-changes-6) - [SIG API Machinery](#sig-api-machinery-1) - [SIG Apps](#sig-apps) - [SIG Auth](#sig-auth) @@ -80,54 +87,127 @@ - [SIG Storage](#sig-storage-1) - [SIG VMWare](#sig-vmware-1) - [SIG Windows](#sig-windows-1) - - [Other Notable Changes](#other-notable-changes-6) + - [Other Notable Changes](#other-notable-changes-7) - [Bug Fixes](#bug-fixes) - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) - [External Dependencies](#external-dependencies) - [v1.12.0-rc.2](#v1120-rc2) - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - - [Client Binaries](#client-binaries-6) - - [Server Binaries](#server-binaries-6) - - [Node Binaries](#node-binaries-6) - - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - - [Other notable changes](#other-notable-changes-7) -- [v1.12.0-rc.1](#v1120-rc1) - - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) - - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) - - [Action Required](#action-required-2) + - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - [Other notable changes](#other-notable-changes-8) -- [v1.12.0-beta.2](#v1120-beta2) - - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) +- [v1.12.0-rc.1](#v1120-rc1) + - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - - [Action Required](#action-required-3) + - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-9) -- [v1.12.0-beta.1](#v1120-beta1) - - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) +- [v1.12.0-beta.2](#v1120-beta2) + - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - - [Action Required](#action-required-4) + - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-10) -- [v1.12.0-alpha.1](#v1120-alpha1) - - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) +- [v1.12.0-beta.1](#v1120-beta1) + - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-11) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Client Binaries](#client-binaries-11) + - [Server Binaries](#server-binaries-11) + - [Node Binaries](#node-binaries-11) - [Changelog since v1.11.0](#changelog-since-v1110) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-11) + - [Other notable changes](#other-notable-changes-12) +# v1.12.6 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.6 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes.tar.gz) | `22868d7e1e381944e005ff28de4de2a5bf85047dc724a2e59ee5bf9adf11519c0f619f18523bb317474791514d3d5530ce65268cd3bafb8bd3427f10f31f6875` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-src.tar.gz) | `a694b53e13d7d559750ca6e4a3274591b30dabe9f5735e60a46e37540fde44d2c965489e52c6dabbf1ad316bb334da6dd2170c5cbf7a0e4cf4fc936368b13a61` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-darwin-386.tar.gz) | `d6ff2bf02a67e081f1813d63ca32d2891d9acc280d5874de16c262a6eca94e4dadddad08330cb3b87d1241b8abddc279126fbc3eb5fffe75de4afca8e2f27e59` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-darwin-amd64.tar.gz) | `87048a989ce273199059d726a61ca67e09521b696e535db983c65847673c7000e32618e8a1b46d2e008dece9cd79bee3e27536ac86b6d2b9c5f8e5df93b0f9ac` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-386.tar.gz) | `485c642f9b73fc1ccff7b33f764e13cb85a12c3f0e0ab6c90ac38ad265d13bf45f02615c8649ca5820798c32640f4129a79e80e2b3a78085897fbdc4926b8a33` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-amd64.tar.gz) | `230946db5a8b2dd835b61119c6527206c6387ed776d55b6ddd4a67953bc99f0ad11b80a40f1d4393a581dbc4639a440553a7e569c6377891d6e0ed4b1848b538` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-arm.tar.gz) | `791e9d1c21333f626241a0975d5dd88a989e8d7498f48906616f43f9a566af8230e3a82565972c7eb20e4a7747402d4cf62aab54a674886e454320708aff4324` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-arm64.tar.gz) | `05e7c47d64a9d2bd249c9f5059b9d1fafd30e6233f14dba0313faa01a765cb5e3d5abc095affae7b638f6d6d9bb5166020b394b6bf3af4cbb1dc5bf10244f6ee` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-ppc64le.tar.gz) | `e2ad0edc976a6a276c736885662caae427f7cef11ccd7a0c923240d4d659b13f968644f3df6c9c343039c1128703644911cd6de2786be83378d4c825ec823abf` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-linux-s390x.tar.gz) | `5f1dbe3b4ddb956b287cda6e9d61f90e0f795ea8400d29fb3b9e5d0422f12d3a584db9319bbf70db3935ae943b0a2e676efb6f571ac9a385d9ee1fe03d51d2b9` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-windows-386.tar.gz) | `9aae0dc316ae765b5b5c9d62ba82b9f36556bbe170574437f750c856d35232a36f6aa9575949f1a80bc4c11c659e556dd3acc0c152f7e9d551423af87afa6453` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-client-windows-amd64.tar.gz) | `28787f5604287b3e00301c6d1c41f7f388fdff32f0b241f956f3f5b5ef56f06d40f62f3368229247121ad132368dcd9d626308230524b304bab4d3a2de4fc8ef` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-server-linux-amd64.tar.gz) | `6854aa0a35a952f3906a62a532ca96394e927e96751a5796927afacccf5c9ebc01c19d5e69dfc80f790af6e9ec0fce2559ce6304ac71dda0c7869291da7f3e27` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-server-linux-arm.tar.gz) | `d4b16aba17b704efe27e28c813c4577a046846794751032b740ed30975d648298ce886b2a2ca37d83ee280bf7cafe6c60a0cbb4690177f50297b6223e3cc6774` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-server-linux-arm64.tar.gz) | `20b806b8360256d305ebf1187e024d555d9267f9081e83e47de179ccdb27d21e2f03d90dad92a598d3d1229b23d8f8599a90858c1408881a9d768c4ccc8246dc` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-server-linux-ppc64le.tar.gz) | `5d4ac12377e3cc236d193566c1a35b3e5a7bc61f78475ba7151285cb920db09c888b33fab529f91bc620fa210554f52e69d14371c0cc1ab95b9ed817c3c56df5` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-server-linux-s390x.tar.gz) | `83acc09af047d63ce33c84e9d29efddea49911b7f111f10be5b13d3f63ea72acf258553137b3ca06992a4b77afcee38b085dd6a6e8aa748ac033e9b57120f452` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-linux-amd64.tar.gz) | `0d0bca352f912c5b1a928fc8bcfa79909c54c394a87df7ede3a5a549fed91129997e2266ecb11c2f405d162a9d1e468f8b8ddef73c79aaa3f2ee911aa02c1782` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-linux-arm.tar.gz) | `be7d4db5155579f06592a93c567b9973016a548140a4754e0358c4b052410bcc34d64097b2926ba7edd3faf720b4785b169b68af666978b1b32946f8474c55f4` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-linux-arm64.tar.gz) | `1834a69e0c45029d9ce5e1e489c3b414b89239c3c891a9ef678aeabe634d4d2bdea5e756a0fa199391a8bd3e620ae9100eb6e6003a76b1f7eeb24498bfaf7b1c` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-linux-ppc64le.tar.gz) | `3bd9a5ebe63b91a6bb17f14ef5a65f30d9d386f3bb7b64c5ea1d9a25d4df41e07e3494d8bf7c6f36f11df10f838302b83be01e2fe1de36c3876a4443da6aaf1d` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-linux-s390x.tar.gz) | `900f8ce043f524a5be6db9fe2d1726df4679519732d5b70a247d28e76c91df3f1100f92d8fbfdd89094a1fe79dc28645625aba0378bc88781f96dc8baf9a9396` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.6/kubernetes-node-windows-amd64.tar.gz) | `1426234cd069c0cd51dea51fb1de5f1069c5defbc92cb68eebe6781095b2393477c4f85297f7c2361a4eab6a64e0d0281f3fabeafc764d1e90225c225f3e210c` + +## Changelog since v1.12.5 + +### Other notable changes + +* kubeadm: fixed nil pointer dereference caused by a bug in url parsing ([#74454](https://github.com/kubernetes/kubernetes/pull/74454), [@bart0sh](https://github.com/bart0sh)) +* kube-apiserver: a request body of a CREATE/UPDATE/PATCH/DELETE resource operation larger than 100 MB will return a 413 "request entity too large" error. ([#73805](https://github.com/kubernetes/kubernetes/pull/73805), [@caesarxuchao](https://github.com/caesarxuchao)) + * Custom apiservers built with the latest apiserver library will have the 100MB limit on the body of resource requests as well. The limit can be altered via ServerRunOptions.MaxRequestBodyBytes. + * The body size limit does not apply to subresources like pods/proxy that proxy request content to another server. +* The apiserver, including both the kube-apiserver and apiservers built with the generic apiserver library, will now return 413 RequestEntityTooLarge error if a json patch contains more than 10,000 operations. ([#74000](https://github.com/kubernetes/kubernetes/pull/74000), [@caesarxuchao](https://github.com/caesarxuchao)) +* fix smb remount issue on Windows ([#73661](https://github.com/kubernetes/kubernetes/pull/73661), [@andyzhangx](https://github.com/andyzhangx)) +* Add `metrics-port` to kube-proxy cmd flags. ([#72682](https://github.com/kubernetes/kubernetes/pull/72682), [@whypro](https://github.com/whypro)) +* Adds deleting pods created by DaemonSet assigned to not existing nodes. ([#73401](https://github.com/kubernetes/kubernetes/pull/73401), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +* Fix watch to not send the same set of events multiple times causing watcher to go back in time ([#73845](https://github.com/kubernetes/kubernetes/pull/73845), [@wojtek-t](https://github.com/wojtek-t)) +* MAC Address filter has been fixed in vSphere Cloud Provider, it no longer ignores `00:1c:14` and `00:05:69` prefixes ([#73721](https://github.com/kubernetes/kubernetes/pull/73721), [@frapposelli](https://github.com/frapposelli)) +* fixes an error processing watch events when running skewed apiservers ([#73482](https://github.com/kubernetes/kubernetes/pull/73482), [@liggitt](https://github.com/liggitt)) +* add goroutine to move unschedulable pods to activeq if they are not retried for more than 1 minute ([#72558](https://github.com/kubernetes/kubernetes/pull/72558), [@denkensk](https://github.com/denkensk)) +* scheduler: use incremental scheduling cycle in PriorityQueue to put all in-flight unschedulable pods back to active queue if we received move request ([#73309](https://github.com/kubernetes/kubernetes/pull/73309), [@cofyc](https://github.com/cofyc)) +* A new `TaintNodesByCondition` admission plugin taints newly created Node objects as "not ready", to fix a race condition that could cause pods to be scheduled on new nodes before their taints were updated to accurately reflect their reported conditions. This admission plugin is enabled by default if the `TaintNodesByCondition` feature is enabled. ([#73097](https://github.com/kubernetes/kubernetes/pull/73097), [@bsalamat](https://github.com/bsalamat)) +* Scale max-inflight limits together with master VM sizes. ([#73268](https://github.com/kubernetes/kubernetes/pull/73268), [@wojtek-t](https://github.com/wojtek-t)) +* Update to go1.10.8 ([#73329](https://github.com/kubernetes/kubernetes/pull/73329), [@ixdy](https://github.com/ixdy)) +* Allow for watching objects larger than 1MB given etcd accepts objects of size up to 1.5MB ([#72053](https://github.com/kubernetes/kubernetes/pull/72053), [@wojtek-t](https://github.com/wojtek-t)) +* Improve efficiency of preemption logic in clusters with many pending pods. ([#72895](https://github.com/kubernetes/kubernetes/pull/72895), [@bsalamat](https://github.com/bsalamat)) + + + # v1.12.5 [Documentation](https://docs.k8s.io) @@ -582,7 +662,7 @@ Sig Azure was focused on two primary new alpha features: Besides the above new features, support for Azure Virtual Machine Scale Sets (VMSS) and Cluster-Autoscaler is now stable and considered GA: - Azure virtual machine scale sets (VMSS) allow you to create and manage identical load balanced VMs that automatically increase or decrease based on demand or a set schedule. -- With this new stable feature, Kubernetes supports the scaling of containerized applications with Azure VMSS, including the ability to integrate it with cluster-autoscaler to automatically adjust the size of the Kubernetes clusters based on the same conditions. +- With this new stable feature, Kubernetes supports the scaling of containerized applications with Azure VMSS, including the ability to integrate it with cluster-autoscaler to automatically adjust the size of the Kubernetes clusters based on the same conditions. ### SIG-cli @@ -593,11 +673,11 @@ SIG CLI focused on implementing the new plugin mechanism, providing a library wi This is the first Kubernetes release for this SIG! In v1.12, SIG Cloud Provider focused on building the processes and infrastructure to better support existing and new cloud providers. Some of these initiatives (many of which are still in progress) are: - Reporting E2E conformance test results to TestGrid from every cloud provider (in collaboration with SIG Testing & SIG Release) -- Defining minimum required documentation from each cloud provider which includes (in collaboration with SIG Docs): +- Defining minimum required documentation from each cloud provider which includes (in collaboration with SIG Docs): - example manifests for the kube-apiserver, kube-controller-manager, kube-schedule, kubelet, and the cloud-controller-manager - labels/annotations that are consumed by any cloud specific controllers -In addition to the above, SIG Cloud Provider has been focusing on a long running effort to remove cloud provider code from kubernetes/kubernetes. +In addition to the above, SIG Cloud Provider has been focusing on a long running effort to remove cloud provider code from kubernetes/kubernetes. ### SIG-cluster-lifecycle @@ -638,11 +718,11 @@ No feature work, but a large refactoring of metrics-server as well as a number o ### SIG-node -SIG-node graduated the PodShareProcessNamespace feature from alpha to beta. This feature allows a pod spec to request that all containers in a pod share a common process namespaces. +SIG-node graduated the PodShareProcessNamespace feature from alpha to beta. This feature allows a pod spec to request that all containers in a pod share a common process namespaces. -Two alpha features were also added in this release. +Two alpha features were also added in this release. -The RuntimeClass alpha feature enables a node to surface multiple runtime options to support a variety of workload types. Examples include native linux containers, and “sandboxed” containers that isolate the container from the host kernel. +The RuntimeClass alpha feature enables a node to surface multiple runtime options to support a variety of workload types. Examples include native linux containers, and “sandboxed” containers that isolate the container from the host kernel. The CustomCFSQuotaPeriod alpha feature enables node administrators to change the default period used to enforce CFS quota on a node. This can improve performance for some workloads that experience latency while using CFS quota with the default measurement period. Finally, the SIG continues to focus on improving reliability by fixing bugs while working out design and implementation of future features. @@ -732,7 +812,7 @@ SIG Windows focused on stability and reliability of our existing feature set. We - kubeadm: The `v1alpha1` config API has been removed. ([#65628](https://github.com/kubernetes/kubernetes/pull/65628), [@luxas](https://github.com/luxas)) Courtesy of SIG Cluster Lifecycle - kube-apiserver: When using `--enable-admission-plugins` the `Priority` admission plugin is now enabled by default (matching changes in 1.11.1+). If using `--admission-control` to fully specify the set of admission plugins, it is now necessary to add the `Priority` admission plugin for the PodPriority feature to work properly. ([#65739](https://github.com/kubernetes/kubernetes/pull/65739), [@liggitt](https://github.com/liggitt)) Courtesy of SIG Scheduling - The `system-node-critical` and `system-cluster-critical` priority classes are now limited to the `kube-system` namespace by the `PodPriority` admission plugin (matching changes in 1.11.1+). ([#65593](https://github.com/kubernetes/kubernetes/pull/65593), [@bsalamat](https://github.com/bsalamat)) Courtesy of SIG Scheduling -- kubeadm: Control plane images (etcd, kube-apiserver, kube-proxy, etc.) no longer use arch suffixes. Arch suffixes are kept for kube-dns only. ([#66960](https://github.com/kubernetes/kubernetes/pull/66960), +- kubeadm: Control plane images (etcd, kube-apiserver, kube-proxy, etc.) no longer use arch suffixes. Arch suffixes are kept for kube-dns only. ([#66960](https://github.com/kubernetes/kubernetes/pull/66960), [@rosti](https://github.com/rosti)) Courtesy of SIG Cluster Lifecycle, SIG Release, and SIG Testing - kubeadm - Feature-gates HighAvailability, SelfHosting, CertsInSecrets are now deprecated and can no longer be used for new clusters. Cluster updates using above feature-gates flag is not supported. ([#67786](https://github.com/kubernetes/kubernetes/pull/67786), [@fabriziopandini](https://github.com/fabriziopandini)) Courtesy of SIG Cluster Lifecycle - 'KubeSchedulerConfiguration' which used to be under GroupVersion 'componentconfig/v1alpha1', @@ -798,7 +878,7 @@ is now under 'kubescheduler.config.k8s.io/v1alpha1'. ([#66916](https://github.c - kubeadm now supports the phase command "alpha phase kubelet config annotate-cri". ([#68449](https://github.com/kubernetes/kubernetes/pull/68449), [@fabriziopandini](https://github.com/fabriziopandini)) - kubeadm: --cri-socket now defaults to tcp://localhost:2375 when running on Windows. ([#67447](https://github.com/kubernetes/kubernetes/pull/67447), [@benmoss](https://github.com/benmoss)) - kubeadm now includes a new EXPERIMENTAL `--rootfs`, which (if specified) causes kubeadm to chroot before performing any file operations. This is expected to be useful when setting up kubernetes on a different filesystem, such as invoking kubeadm from docker. ([#54935](https://github.com/kubernetes/kubernetes/pull/54935), [@anguslees](https://github.com/anguslees)) -- The command line option --cri-socket-path of the kubeadm subcommand "kubeadm config images pull" has been renamed to --cri-socket to be consistent with the rest of kubeadm subcommands. +- The command line option --cri-socket-path of the kubeadm subcommand "kubeadm config images pull" has been renamed to --cri-socket to be consistent with the rest of kubeadm subcommands. - kubeadm: The ControlPlaneEndpoint was moved from the API config struct to ClusterConfiguration ([#67830](https://github.com/kubernetes/kubernetes/pull/67830), [@fabriziopandini](https://github.com/fabriziopandini)) - kubeadm: InitConfiguration now consists of two structs: InitConfiguration and ClusterConfiguration ([#67441](https://github.com/kubernetes/kubernetes/pull/67441), [@rosti](https://github.com/rosti)) - The RuntimeClass API has been added. This feature is in alpha, and the RuntimeClass feature gate must be enabled in order to use it. The RuntimeClass API resource defines different classes of runtimes that may be used to run containers in the cluster. Pods can select a RuntimeClass to use via the RuntimeClassName field. ([#67737](https://github.com/kubernetes/kubernetes/pull/67737), [@tallclair](https://github.com/tallclair)) @@ -845,7 +925,7 @@ is now under 'kubescheduler.config.k8s.io/v1alpha1'. ([#66916](https://github.c ### SIG Auth -- TokenRequest and TokenRequestProjection are now beta features. To enable these feature, the API server needs to be started with the `--service-account-issuer`, `--service-account-signing-key-file`, and `--service-account-api-audiences` flags. +- TokenRequest and TokenRequestProjection are now beta features. To enable these feature, the API server needs to be started with the `--service-account-issuer`, `--service-account-signing-key-file`, and `--service-account-api-audiences` flags. ([#67349](https://github.com/kubernetes/kubernetes/pull/67349), [@mikedanese](https://github.com/mikedanese)) - The admin RBAC role now aggregates edit and view. The edit RBAC role now aggregates view. ([#66684](https://github.com/kubernetes/kubernetes/pull/66684), [@deads2k](https://github.com/deads2k)) - UserInfo derived from service account tokens created from the TokenRequest API now include the pod name and UID in the Extra field. ([#61858](https://github.com/kubernetes/kubernetes/pull/61858), [@mikedanese](https://github.com/mikedanese)) @@ -907,7 +987,7 @@ is now under 'kubescheduler.config.k8s.io/v1alpha1'. ([#66916](https://github.c - kubeadm uses audit policy v1 instead of v1beta1 ([#67176](https://github.com/kubernetes/kubernetes/pull/67176), [@charrywanganthony](https://github.com/charrywanganthony)) - Kubeadm nodes will no longer be able to run with an empty or invalid hostname in /proc/sys/kernel/hostname ([#64815](https://github.com/kubernetes/kubernetes/pull/64815), [@dixudx](https://github.com/dixudx)) - kubeadm now can join the cluster with pre-existing client certificate if provided ([#66482](https://github.com/kubernetes/kubernetes/pull/66482), [@dixudx](https://github.com/dixudx)) -([#66382](https://github.com/kubernetes/kubernetes/pull/66382), [@bart0sh](https://github.com/bart0sh)) +([#66382](https://github.com/kubernetes/kubernetes/pull/66382), [@bart0sh](https://github.com/bart0sh)) - kubeadm will no longer hang indefinitely if there is no Internet connection and --kubernetes-version is not specified.([#65676](https://github.com/kubernetes/kubernetes/pull/65676), [@dkoshkin](https://github.com/dkoshkin)) - kubeadm: kube-proxy will now run on all nodes, and not just master nodes.([#65931](https://github.com/kubernetes/kubernetes/pull/65931), [@neolit123](https://github.com/neolit123)) - kubeadm now uses separate YAML documents for the kubelet and kube-proxy ComponentConfigs. ([#65787](https://github.com/kubernetes/kubernetes/pull/65787), [@luxas](https://github.com/luxas)) @@ -1653,7 +1733,7 @@ filename | sha256 hash * The PodShareProcessNamespace feature to configure PID namespace sharing within a pod has been promoted to beta. ([#66507](https://github.com/kubernetes/kubernetes/pull/66507), [@verb](https://github.com/verb)) * `kubectl create {clusterrole,role}`'s `--resources` flag supports asterisk to specify all resources. ([#62945](https://github.com/kubernetes/kubernetes/pull/62945), [@nak3](https://github.com/nak3)) * Bump up version number of debian-base, debian-hyperkube-base and debian-iptables. ([#67026](https://github.com/kubernetes/kubernetes/pull/67026), [@satyasm](https://github.com/satyasm)) - * Also updates dependencies of users of debian-base. + * Also updates dependencies of users of debian-base. * debian-base version 0.3.1 is already available. * DynamicProvisioningScheduling and VolumeScheduling is now supported for Azure managed disks. Feature gates DynamicProvisioningScheduling and VolumeScheduling should be enabled before using this feature. ([#67121](https://github.com/kubernetes/kubernetes/pull/67121), [@feiskyer](https://github.com/feiskyer)) * kube-apiserver now includes all registered API groups in discovery, including registered extension API group/versions for unavailable extension API servers. ([#66932](https://github.com/kubernetes/kubernetes/pull/66932), [@nilebox](https://github.com/nilebox)) From d04f274d81417d3073ed3dd78c22c5281d83000d Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Tue, 16 Oct 2018 13:35:42 -0700 Subject: [PATCH 11/96] Reduce cardinality of admission webhook metrics --- .../pkg/admission/metrics/metrics.go | 15 ++++---- .../pkg/admission/metrics/metrics_test.go | 34 ++++++------------- 2 files changed, 17 insertions(+), 32 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics.go index 0955a98c9b88a..a5ab97a74d52b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics.go @@ -112,17 +112,17 @@ func newAdmissionMetrics() *AdmissionMetrics { // Admission metrics for a step of the admission flow. The entire admission flow is broken down into a series of steps // Each step is identified by a distinct type label value. step := newMetricSet("step", - []string{"type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"type", "operation", "rejected"}, "Admission sub-step %s, broken out for each operation and API resource and step type (validate or admit).", true) // Built-in admission controller metrics. Each admission controller is identified by name. controller := newMetricSet("controller", - []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"name", "type", "operation", "rejected"}, "Admission controller %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) // Admission webhook metrics. Each webhook is identified by name. webhook := newMetricSet("webhook", - []string{"name", "type", "operation", "group", "version", "resource", "subresource", "rejected"}, + []string{"name", "type", "operation", "rejected"}, "Admission webhook %s, identified by name and broken out for each operation and API resource and type (validate or admit).", false) step.mustRegister() @@ -139,20 +139,17 @@ func (m *AdmissionMetrics) reset() { // ObserveAdmissionStep records admission related metrics for a admission step, identified by step type. func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } // ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name. func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } // ObserveWebhook records admission related metrics for a admission webhook. func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) { - gvr := attr.GetResource() - m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...) + m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), strconv.FormatBool(rejected))...) } type metricSet struct { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go index d2b4bf75afea1..92c8314da03e1 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/metrics/metrics_test.go @@ -37,13 +37,9 @@ func TestObserveAdmissionStep(t *testing.T) { handler.(admission.MutationInterface).Admit(attr) handler.(admission.ValidationInterface).Validate(attr) wantLabels := map[string]string{ - "operation": string(admission.Create), - "group": resource.Group, - "version": resource.Version, - "resource": resource.Resource, - "subresource": "subresource", - "type": "admit", - "rejected": "false", + "operation": string(admission.Create), + "type": "admit", + "rejected": "false", } expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", wantLabels, 1) expectFindMetric(t, "apiserver_admission_step_admission_latencies_seconds_summary", wantLabels) @@ -59,14 +55,10 @@ func TestObserveAdmissionController(t *testing.T) { handler.(admission.MutationInterface).Admit(attr) handler.(admission.ValidationInterface).Validate(attr) wantLabels := map[string]string{ - "name": "a", - "operation": string(admission.Create), - "group": resource.Group, - "version": resource.Version, - "resource": resource.Resource, - "subresource": "subresource", - "type": "admit", - "rejected": "false", + "name": "a", + "operation": string(admission.Create), + "type": "admit", + "rejected": "false", } expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", wantLabels, 1) @@ -78,14 +70,10 @@ func TestObserveWebhook(t *testing.T) { Metrics.reset() Metrics.ObserveWebhook(2*time.Second, false, attr, stepAdmit, "x") wantLabels := map[string]string{ - "name": "x", - "operation": string(admission.Create), - "group": resource.Group, - "version": resource.Version, - "resource": resource.Resource, - "subresource": "subresource", - "type": "admit", - "rejected": "false", + "name": "x", + "operation": string(admission.Create), + "type": "admit", + "rejected": "false", } expectHistogramCountTotal(t, "apiserver_admission_webhook_admission_latencies_seconds", wantLabels, 1) } From 7d49489b80536559a522ab0c880dc5e61b87dd95 Mon Sep 17 00:00:00 2001 From: danielqsj Date: Fri, 22 Feb 2019 10:52:04 +0800 Subject: [PATCH 12/96] fix negative slice index error in keymutex --- pkg/util/keymutex/hashed.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/util/keymutex/hashed.go b/pkg/util/keymutex/hashed.go index 5fe9a025c2455..d97d50f659937 100644 --- a/pkg/util/keymutex/hashed.go +++ b/pkg/util/keymutex/hashed.go @@ -45,20 +45,20 @@ type hashedKeyMutex struct { // Acquires a lock associated with the specified ID. func (km *hashedKeyMutex) LockKey(id string) { glog.V(5).Infof("hashedKeyMutex.LockKey(...) called for id %q\r\n", id) - km.mutexes[km.hash(id)%len(km.mutexes)].Lock() + km.mutexes[km.hash(id)%uint32(len(km.mutexes))].Lock() glog.V(5).Infof("hashedKeyMutex.LockKey(...) for id %q completed.\r\n", id) } // Releases the lock associated with the specified ID. func (km *hashedKeyMutex) UnlockKey(id string) error { glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) called for id %q\r\n", id) - km.mutexes[km.hash(id)%len(km.mutexes)].Unlock() + km.mutexes[km.hash(id)%uint32(len(km.mutexes))].Unlock() glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) for id %q completed.\r\n", id) return nil } -func (km *hashedKeyMutex) hash(id string) int { +func (km *hashedKeyMutex) hash(id string) uint32 { h := fnv.New32a() h.Write([]byte(id)) - return int(h.Sum32()) + return h.Sum32() } From 6cd8e914d2db604f49caedc67d68abe8c1356c79 Mon Sep 17 00:00:00 2001 From: Han Kang Date: Tue, 26 Feb 2019 16:22:24 -0800 Subject: [PATCH 13/96] Remove reflector metrics as they currently cause a memory leak --- pkg/util/reflector/prometheus/prometheus.go | 2 -- .../k8s.io/client-go/tools/cache/reflector.go | 27 +------------------ 2 files changed, 1 insertion(+), 28 deletions(-) diff --git a/pkg/util/reflector/prometheus/prometheus.go b/pkg/util/reflector/prometheus/prometheus.go index 958a0007cddbc..63657e9c55de2 100644 --- a/pkg/util/reflector/prometheus/prometheus.go +++ b/pkg/util/reflector/prometheus/prometheus.go @@ -85,8 +85,6 @@ func init() { prometheus.MustRegister(watchDuration) prometheus.MustRegister(itemsPerWatch) prometheus.MustRegister(lastResourceVersion) - - cache.SetReflectorMetricsProvider(prometheusMetricsProvider{}) } type prometheusMetricsProvider struct{} diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index 9ee7efcbbd822..1236e9ce7a94d 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -24,10 +24,8 @@ import ( "net" "net/url" "reflect" - "strconv" "strings" "sync" - "sync/atomic" "syscall" "time" @@ -95,17 +93,10 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) } -// reflectorDisambiguator is used to disambiguate started reflectors. -// initialized to an unstable value to ensure meaning isn't attributed to the suffix. -var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345) - // NewNamedReflector same as NewReflector, but with a specified name for logging func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { - reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1) r := &Reflector{ - name: name, - // we need this to be unique per process (some names are still the same) but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + name: name, listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -173,13 +164,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. options := metav1.ListOptions{ResourceVersion: "0"} - r.metrics.numberOfLists.Inc() - start := r.clock.Now() list, err := r.listerWatcher.List(options) if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) } - r.metrics.listDuration.Observe(time.Since(start).Seconds()) listMetaInterface, err := meta.ListAccessor(list) if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err) @@ -189,7 +177,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err != nil { return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err) } - r.metrics.numberOfItemsInList.Observe(float64(len(items))) if err := r.syncWith(items, resourceVersion); err != nil { return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err) } @@ -239,7 +226,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { TimeoutSeconds: &timeoutSeconds, } - r.metrics.numberOfWatches.Inc() w, err := r.listerWatcher.Watch(options) if err != nil { switch err { @@ -291,11 +277,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err // Stopping the watcher should be idempotent and if we return from this function there's no way // we're coming back in with the same watch interface. defer w.Stop() - // update metrics - defer func() { - r.metrics.numberOfItemsInWatch.Observe(float64(eventCount)) - r.metrics.watchDuration.Observe(time.Since(start).Seconds()) - }() loop: for { @@ -351,7 +332,6 @@ loop: watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { - r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) @@ -370,9 +350,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) { r.lastSyncResourceVersionMutex.Lock() defer r.lastSyncResourceVersionMutex.Unlock() r.lastSyncResourceVersion = v - - rv, err := strconv.Atoi(v) - if err == nil { - r.metrics.lastResourceVersion.Set(float64(rv)) - } } From 59711d5381f6ec7aca4c2136f5bd296f8c4037ff Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Sat, 23 Feb 2019 00:19:47 -0500 Subject: [PATCH 14/96] Explicitly set GVK when sending objects to webhooks --- .../pkg/admission/plugin/webhook/generic/conversion.go | 2 ++ .../admission/plugin/webhook/generic/conversion_test.go | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go index a75c63fa9feec..050c31730fe20 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion.go @@ -43,6 +43,8 @@ func (c *convertor) ConvertToGVK(obj runtime.Object, gvk schema.GroupVersionKind if err != nil { return nil, err } + // Explicitly set the GVK + out.GetObjectKind().SetGroupVersionKind(gvk) return out, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go index 499853566fefd..153712e3019e7 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/conversion_test.go @@ -61,6 +61,10 @@ func TestConvertToGVK(t *testing.T) { }, gvk: examplev1.SchemeGroupVersion.WithKind("Pod"), expectedObj: &examplev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "example.apiserver.k8s.io/v1", + Kind: "Pod", + }, ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Labels: map[string]string{ @@ -86,6 +90,10 @@ func TestConvertToGVK(t *testing.T) { }, gvk: example2v1.SchemeGroupVersion.WithKind("ReplicaSet"), expectedObj: &example2v1.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "example2.apiserver.k8s.io/v1", + Kind: "ReplicaSet", + }, ObjectMeta: metav1.ObjectMeta{ Name: "rs1", Labels: map[string]string{ From d5fe49be10b7a84d4ccc2d61211e1a447f6febcd Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 28 Feb 2019 08:52:35 +0000 Subject: [PATCH 15/96] add Azure Container Registry anonymous repo support apply fix for msi and fix test failure --- pkg/credentialprovider/azure/azure_credentials.go | 7 +++++++ pkg/credentialprovider/azure/azure_credentials_test.go | 8 ++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index ffeab5ea5ab66..256dcc7f2e8cc 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -206,6 +206,13 @@ func (a *acrProvider) Provide() credentialprovider.DockerConfig { cfg[url] = *cred } } + + // add ACR anonymous repo support: use empty username and password for anonymous access + cfg["*.azurecr.*"] = credentialprovider.DockerConfigEntry{ + Username: "", + Password: "", + Email: dummyRegistryEmail, + } return cfg } diff --git a/pkg/credentialprovider/azure/azure_credentials_test.go b/pkg/credentialprovider/azure/azure_credentials_test.go index d0201f0a47748..6e5434ee285e9 100644 --- a/pkg/credentialprovider/azure/azure_credentials_test.go +++ b/pkg/credentialprovider/azure/azure_credentials_test.go @@ -76,14 +76,14 @@ func Test(t *testing.T) { creds := provider.Provide() - if len(creds) != len(result) { - t.Errorf("Unexpected list: %v, expected length %d", creds, len(result)) + if len(creds) != len(result)+1 { + t.Errorf("Unexpected list: %v, expected length %d", creds, len(result)+1) } for _, cred := range creds { - if cred.Username != "foo" { + if cred.Username != "" && cred.Username != "foo" { t.Errorf("expected 'foo' for username, saw: %v", cred.Username) } - if cred.Password != "bar" { + if cred.Password != "" && cred.Password != "bar" { t.Errorf("expected 'bar' for password, saw: %v", cred.Username) } } From 3687f555f60add91c34ac37c3e08688cb58c31b8 Mon Sep 17 00:00:00 2001 From: Alexander Brand Date: Tue, 22 Jan 2019 11:09:30 -0500 Subject: [PATCH 16/96] DaemonSet e2e: Update image and rolling upgrade test timeout Use Nginx as the DaemonSet image instead of the ServeHostname image. This was changed because the ServeHostname has a sleep after terminating which makes it incompatible with the DaemonSet Rolling Upgrade e2e test. In addition, make the DaemonSet Rolling Upgrade e2e test timeout a function of the number of nodes that make up the cluster. This is required because the more nodes there are, the longer the time it will take to complete a rolling upgrade. Signed-off-by: Alexander Brand --- test/e2e/apps/daemon_set.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index acd6224313670..18bc991258070 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -93,7 +93,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { f = framework.NewDefaultFramework("daemonsets") - image := framework.ServeHostnameImage + image := NginxImage dsName := "daemon-set" var ns string @@ -350,8 +350,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch)) Expect(err).NotTo(HaveOccurred()) + // Time to complete the rolling upgrade is proportional to the number of nodes in the cluster. + // Get the number of nodes, and set the timeout appropriately. + nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + nodeCount := len(nodes.Items) + retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second + By("Check that daemon pods images are updated.") - err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) + err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1)) Expect(err).NotTo(HaveOccurred()) By("Check that daemon pods are still running on every node of the cluster.") From e61eddd0b58d6ae4180af1809b3f433255b08580 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Thu, 28 Feb 2019 17:33:57 -0500 Subject: [PATCH 17/96] Revert kubelet to default to ttl cache secret/configmap behavior --- .../app/util/config/testdata/conversion/master/internal.yaml | 2 +- .../app/util/config/testdata/conversion/master/v1alpha2.yaml | 2 +- .../app/util/config/testdata/conversion/master/v1alpha3.yaml | 2 +- .../app/util/config/testdata/defaulting/master/defaulted.yaml | 2 +- pkg/kubelet/apis/config/v1beta1/defaults.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index 9ad61455ddc48..ababf0679f8aa 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -87,7 +87,7 @@ ComponentConfigs: ClusterDNS: - 10.96.0.10 ClusterDomain: cluster.local - ConfigMapAndSecretChangeDetectionStrategy: Watch + ConfigMapAndSecretChangeDetectionStrategy: Cache ContainerLogMaxFiles: 5 ContainerLogMaxSize: 10Mi ContentType: application/vnd.kubernetes.protobuf diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml index b5ffea7a2cd4e..c47e2b6ccb4ac 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml @@ -86,7 +86,7 @@ kubeletConfiguration: clusterDNS: - 10.96.0.10 clusterDomain: cluster.local - configMapAndSecretChangeDetectionStrategy: Watch + configMapAndSecretChangeDetectionStrategy: Cache containerLogMaxFiles: 5 containerLogMaxSize: 10Mi contentType: application/vnd.kubernetes.protobuf diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml index 4c9aadbd33657..20d5dc9ab616b 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml @@ -102,7 +102,7 @@ cgroupsPerQOS: true clusterDNS: - 10.96.0.10 clusterDomain: cluster.local -configMapAndSecretChangeDetectionStrategy: Watch +configMapAndSecretChangeDetectionStrategy: Cache containerLogMaxFiles: 5 containerLogMaxSize: 10Mi contentType: application/vnd.kubernetes.protobuf diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index 78242aa27645a..54b438a5d3842 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -97,7 +97,7 @@ cgroupsPerQOS: true clusterDNS: - 10.192.0.10 clusterDomain: cluster.global -configMapAndSecretChangeDetectionStrategy: Watch +configMapAndSecretChangeDetectionStrategy: Cache containerLogMaxFiles: 5 containerLogMaxSize: 10Mi contentType: application/vnd.kubernetes.protobuf diff --git a/pkg/kubelet/apis/config/v1beta1/defaults.go b/pkg/kubelet/apis/config/v1beta1/defaults.go index d5a12f338b093..2b0131186e6d7 100644 --- a/pkg/kubelet/apis/config/v1beta1/defaults.go +++ b/pkg/kubelet/apis/config/v1beta1/defaults.go @@ -204,7 +204,7 @@ func SetDefaults_KubeletConfiguration(obj *kubeletconfigv1beta1.KubeletConfigura obj.ContainerLogMaxFiles = utilpointer.Int32Ptr(5) } if obj.ConfigMapAndSecretChangeDetectionStrategy == "" { - obj.ConfigMapAndSecretChangeDetectionStrategy = kubeletconfigv1beta1.WatchChangeDetectionStrategy + obj.ConfigMapAndSecretChangeDetectionStrategy = kubeletconfigv1beta1.TTLCacheChangeDetectionStrategy } if obj.EnforceNodeAllocatable == nil { obj.EnforceNodeAllocatable = DefaultNodeAllocatableEnforcement From ca57f4b1677db2f6b72fdd4f6ea7fc47e00cdd1b Mon Sep 17 00:00:00 2001 From: Lu Fengqi Date: Tue, 26 Feb 2019 17:52:03 +0800 Subject: [PATCH 18/96] cri_stats_provider: overload nil as 0 for exited containers stats Always report 0 cpu/memory usage for exited containers to make metrics-server work as expect. Signed-off-by: Lu Fengqi --- pkg/kubelet/stats/cri_stats_provider.go | 6 ++++ pkg/kubelet/stats/cri_stats_provider_test.go | 29 +++++++++++++++++--- pkg/kubelet/stats/helper.go | 4 +++ 3 files changed, 35 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index 6580f46ead009..e67192cb5b12e 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -354,12 +354,18 @@ func (p *criStatsProvider) makeContainerStats( if stats.Cpu.UsageCoreNanoSeconds != nil { result.CPU.UsageCoreNanoSeconds = &stats.Cpu.UsageCoreNanoSeconds.Value } + } else { + result.CPU.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano())) + result.CPU.UsageCoreNanoSeconds = Uint64Ptr(0) } if stats.Memory != nil { result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp)) if stats.Memory.WorkingSetBytes != nil { result.Memory.WorkingSetBytes = &stats.Memory.WorkingSetBytes.Value } + } else { + result.Memory.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano())) + result.Memory.WorkingSetBytes = Uint64Ptr(0) } if stats.WritableLayer != nil { result.Rootfs.Time = metav1.NewTime(time.Unix(0, stats.WritableLayer.Timestamp)) diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index 9e1dd1bb8c33d..4646f535d8840 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -58,12 +58,14 @@ func TestCRIListPodStats(t *testing.T) { seedContainer2 = 5000 seedSandbox2 = 6000 seedContainer3 = 7000 + seedContainer5 = 9000 ) const ( pName0 = "pod0" pName1 = "pod1" pName2 = "pod2" + pName3 = "pod3" ) const ( @@ -71,6 +73,7 @@ func TestCRIListPodStats(t *testing.T) { cName1 = "container1-name" cName2 = "container2-name" cName3 = "container3-name" + cName5 = "container5-name" ) var ( @@ -101,6 +104,11 @@ func TestCRIListPodStats(t *testing.T) { container4 = makeFakeContainer(sandbox2, cName3, 1, false) containerStats4 = makeFakeContainerStats(container4, imageFsMountpoint) containerLogStats4 = makeFakeLogStats(4000) + + sandbox3 = makeFakePodSandbox("sandbox3-name", "sandbox3-uid", "sandbox3-ns") + container5 = makeFakeContainer(sandbox3, cName5, 0, true) + containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint) + containerLogStats5 = makeFakeLogStats(5000) ) var ( @@ -140,13 +148,13 @@ func TestCRIListPodStats(t *testing.T) { On("GetDirFsInfo", imageFsMountpoint).Return(imageFsInfo, nil). On("GetDirFsInfo", unknownMountpoint).Return(cadvisorapiv2.FsInfo{}, cadvisorfs.ErrNoSuchDevice) fakeRuntimeService.SetFakeSandboxes([]*critest.FakePodSandbox{ - sandbox0, sandbox1, sandbox2, + sandbox0, sandbox1, sandbox2, sandbox3, }) fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ - container0, container1, container2, container3, container4, + container0, container1, container2, container3, container4, container5, }) fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ - containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, + containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, }) ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"}) @@ -161,6 +169,7 @@ func TestCRIListPodStats(t *testing.T) { kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox0-uid"), cName1): containerLogStats1, kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox1-uid"), cName2): containerLogStats2, kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox2-uid"), cName3): containerLogStats4, + kuberuntime.BuildContainerLogsDirectory(types.UID("sandbox3-uid"), cName5): containerLogStats5, } fakeLogStatsProvider := NewFakeLogMetricsService(fakeLogStats) @@ -177,7 +186,7 @@ func TestCRIListPodStats(t *testing.T) { stats, err := provider.ListPodStats() assert := assert.New(t) assert.NoError(err) - assert.Equal(3, len(stats)) + assert.Equal(4, len(stats)) podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats) for _, s := range stats { @@ -239,6 +248,18 @@ func TestCRIListPodStats(t *testing.T) { checkCRINetworkStats(assert, p2.Network, infos[sandbox2.PodSandboxStatus.Id].Stats[0].Network) checkCRIPodCPUAndMemoryStats(assert, p2, infos[sandbox2Cgroup].Stats[0]) + p3 := podStatsMap[statsapi.PodReference{Name: "sandbox3-name", UID: "sandbox3-uid", Namespace: "sandbox3-ns"}] + assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano()) + assert.Equal(1, len(p3.Containers)) + + c5 := p3.Containers[0] + assert.Equal(cName5, c5.Name) + assert.Equal(container5.CreatedAt, c5.StartTime.UnixNano()) + assert.NotNil(c5.CPU.Time) + assert.Zero(*c5.CPU.UsageCoreNanoSeconds) + assert.NotNil(c5.Memory.Time) + assert.Zero(*c5.Memory.WorkingSetBytes) + mockCadvisor.AssertExpectations(t) } diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index a195b9c634a2d..6bb7a6f3db391 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -311,3 +311,7 @@ func getUint64Value(value *uint64) uint64 { return *value } + +func Uint64Ptr(i uint64) *uint64 { + return &i +} From ee1672ae15c0498f3c764aebc07e2301c873071b Mon Sep 17 00:00:00 2001 From: Timo Derstappen Date: Sat, 3 Nov 2018 11:36:14 +0100 Subject: [PATCH 19/96] flush iptable chains first and then remove them while cleaning up ipvs mode. flushing iptable chains first and then remove the chains. this avoids trying to remove chains that are still referenced by rules in other chains. fixes #70615 --- pkg/proxy/ipvs/proxier.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 7061e9ea1f894..555151e73bb31 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -543,7 +543,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool } } - // Flush and remove all of our chains. + // Flush and remove all of our chains. Flushing all chains before removing them also removes all links between chains first. for _, ch := range iptablesChains { if err := ipt.FlushChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { @@ -551,6 +551,10 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool encounteredError = true } } + } + + // Remove all of our chains. + for _, ch := range iptablesChains { if err := ipt.DeleteChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) From 2cb2e3edf2433ddd4a645c886498d012923190d0 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Thu, 7 Feb 2019 11:12:36 -0800 Subject: [PATCH 20/96] Checks whether we have cached runtime state before starting a container that requests any device plugin resource. If not, re-issue Allocate grpc calls. This allows us to handle the edge case that a pod got assigned to a node even before it populates its extended resource capacity. --- pkg/kubelet/cm/devicemanager/manager.go | 28 +++++++++++++++++++++---- pkg/kubelet/kubelet_node_status.go | 1 + 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 8064b572b39a2..30aff1016dd61 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -310,10 +310,7 @@ func (m *ManagerImpl) isVersionCompatibleWithPlugin(versions []string) bool { return false } -// Allocate is the call that you can use to allocate a set of devices -// from the registered device plugins. -func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { - pod := attrs.Pod +func (m *ManagerImpl) allocatePodResources(pod *v1.Pod) error { devicesToReuse := make(map[string]sets.String) for _, container := range pod.Spec.InitContainers { if err := m.allocateContainerResources(pod, &container, devicesToReuse); err != nil { @@ -327,6 +324,18 @@ func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.P } m.podDevices.removeContainerAllocatedResources(string(pod.UID), container.Name, devicesToReuse) } + return nil +} + +// Allocate is the call that you can use to allocate a set of devices +// from the registered device plugins. +func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error { + pod := attrs.Pod + err := m.allocatePodResources(pod) + if err != nil { + glog.Errorf("Failed to allocate device plugin resource for pod %s: %v", string(pod.UID), err) + return err + } m.mutex.Lock() defer m.mutex.Unlock() @@ -716,6 +725,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Container) (*DeviceRunContainerOptions, error) { podUID := string(pod.UID) contName := container.Name + needsReAllocate := false for k := range container.Resources.Limits { resource := string(k) if !m.isDevicePluginResource(resource) { @@ -725,6 +735,16 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co if err != nil { return nil, err } + // This is a device plugin resource yet we don't have cached + // resource state. This is likely due to a race during node + // restart. We re-issue allocate request to cover this race. + if m.podDevices.containerDevices(podUID, contName, resource) == nil { + needsReAllocate = true + } + } + if needsReAllocate { + glog.V(2).Infof("needs re-allocate device plugin resources for pod %s", podUID) + m.allocatePodResources(pod) } m.mutex.Lock() defer m.mutex.Unlock() diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 54f54f149aee5..830aec25cea44 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -132,6 +132,7 @@ func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool { requiresUpdate := false for k := range node.Status.Capacity { if v1helper.IsExtendedResourceName(k) { + glog.Infof("Zero out resource %s capacity in existing node.", k) node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI) node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI) requiresUpdate = true From 38a3162748adb2ca733fd4de9558fc77f60cfa8e Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 5 Mar 2019 14:55:01 +0100 Subject: [PATCH 21/96] Fix panic in kubectl cp command --- pkg/kubectl/cmd/cp.go | 34 ++++++++++-- pkg/kubectl/cmd/cp_test.go | 104 +++++++++++++++++++++++++++++++------ 2 files changed, 119 insertions(+), 19 deletions(-) diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go index e7d73d081852d..768c6b71d679d 100644 --- a/pkg/kubectl/cmd/cp.go +++ b/pkg/kubectl/cmd/cp.go @@ -296,12 +296,24 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error { // remove extraneous path shortcuts - these could occur if a path contained extra "../" // and attempted to navigate beyond "/" in a remote filesystem prefix = stripPathShortcuts(prefix) - return untarAll(reader, dest.File, prefix) + return o.untarAll(reader, dest.File, prefix) } // stripPathShortcuts removes any leading or trailing "../" from a given path func stripPathShortcuts(p string) string { newPath := path.Clean(p) + trimmed := strings.TrimPrefix(newPath, "../") + + for trimmed != newPath { + newPath = trimmed + trimmed = strings.TrimPrefix(newPath, "../") + } + + // trim leftover {".", ".."} + if newPath == "." || newPath == ".." { + newPath = "" + } + if len(newPath) > 0 && string(newPath[0]) == "/" { return newPath[1:] } @@ -389,7 +401,7 @@ func clean(fileName string) string { return path.Clean(string(os.PathSeparator) + fileName) } -func untarAll(reader io.Reader, destFile, prefix string) error { +func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error { entrySeq := -1 // TODO: use compression here? @@ -404,6 +416,12 @@ func untarAll(reader io.Reader, destFile, prefix string) error { } entrySeq++ mode := header.FileInfo().Mode() + // all the files will start with the prefix, which is the directory where + // they were located on the pod, we need to strip down that prefix, but + // if the prefix is missing it means the tar was tempered with + if !strings.HasPrefix(header.Name, prefix) { + return fmt.Errorf("tar contents corrupted") + } outFileName := path.Join(destFile, clean(header.Name[len(prefix):])) baseName := path.Dir(outFileName) if err := os.MkdirAll(baseName, 0755); err != nil { @@ -428,8 +446,16 @@ func untarAll(reader io.Reader, destFile, prefix string) error { } if mode&os.ModeSymlink != 0 { - err := os.Symlink(header.Linkname, outFileName) - if err != nil { + linkname := header.Linkname + // error is returned if linkname can't be made relative to destFile, + // but relative can end up being ../dir that's why we also need to + // verify if relative path is the same after Clean-ing + relative, err := filepath.Rel(destFile, linkname) + if path.IsAbs(linkname) && (err != nil || relative != stripPathShortcuts(relative)) { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", outFileName, header.Linkname) + continue + } + if err := os.Symlink(linkname, outFileName); err != nil { return err } } else { diff --git a/pkg/kubectl/cmd/cp_test.go b/pkg/kubectl/cmd/cp_test.go index 162f3dc3d1505..7be8cd0344dbe 100644 --- a/pkg/kubectl/cmd/cp_test.go +++ b/pkg/kubectl/cmd/cp_test.go @@ -127,26 +127,32 @@ func TestGetPrefix(t *testing.T) { } } -func TestTarUntar(t *testing.T) { - dir, err := ioutil.TempDir("", "input") - dir2, err2 := ioutil.TempDir("", "output") - if err != nil || err2 != nil { - t.Errorf("unexpected error: %v | %v", err, err2) +func checkErr(t *testing.T, err error) { + if err != nil { + t.Errorf("unexpected error: %v", err) t.FailNow() } +} + +func TestTarUntar(t *testing.T) { + dir, err := ioutil.TempDir("", "input") + checkErr(t, err) + dir2, err := ioutil.TempDir("", "output") + checkErr(t, err) + dir3, err := ioutil.TempDir("", "dir") + checkErr(t, err) + dir = dir + "/" defer func() { - if err := os.RemoveAll(dir); err != nil { - t.Errorf("Unexpected error cleaning up: %v", err) - } - if err := os.RemoveAll(dir2); err != nil { - t.Errorf("Unexpected error cleaning up: %v", err) - } + os.RemoveAll(dir) + os.RemoveAll(dir2) + os.RemoveAll(dir3) }() files := []struct { name string data string + omitted bool fileType FileType }{ { @@ -171,7 +177,24 @@ func TestTarUntar(t *testing.T) { }, { name: "gakki", + data: "tmp/gakki", + fileType: SymLink, + }, + { + name: "relative_to_dest", + data: path.Join(dir2, "foo"), + fileType: SymLink, + }, + { + name: "tricky_relative", + data: path.Join(dir3, "xyz"), + omitted: true, + fileType: SymLink, + }, + { + name: "absolute_path", data: "/tmp/gakki", + omitted: true, fileType: SymLink, }, } @@ -204,13 +227,15 @@ func TestTarUntar(t *testing.T) { } + opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) + writer := &bytes.Buffer{} if err := makeTar(dir, dir, writer); err != nil { t.Fatalf("unexpected error: %v", err) } reader := bytes.NewBuffer(writer.Bytes()) - if err := untarAll(reader, dir2, ""); err != nil { + if err := opts.untarAll(reader, dir2, ""); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -237,7 +262,12 @@ func TestTarUntar(t *testing.T) { } } else if file.fileType == SymLink { dest, err := os.Readlink(filePath) - + if file.omitted { + if err != nil && strings.Contains(err.Error(), "no such file or directory") { + continue + } + t.Fatalf("expected to omit symlink for %s", filePath) + } if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -251,6 +281,48 @@ func TestTarUntar(t *testing.T) { } } +func TestTarUntarWrongPrefix(t *testing.T) { + dir, err := ioutil.TempDir("", "input") + checkErr(t, err) + dir2, err := ioutil.TempDir("", "output") + checkErr(t, err) + + dir = dir + "/" + defer func() { + os.RemoveAll(dir) + os.RemoveAll(dir2) + }() + + filepath := path.Join(dir, "foo") + if err := os.MkdirAll(path.Dir(filepath), 0755); err != nil { + t.Fatalf("unexpected error: %v", err) + } + f, err := os.Create(filepath) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer f.Close() + if _, err := io.Copy(f, bytes.NewBuffer([]byte("sample data"))); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) + + writer := &bytes.Buffer{} + if err := makeTar(dir, dir, writer); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + reader := bytes.NewBuffer(writer.Bytes()) + err = opts.untarAll(reader, dir2, "verylongprefix-showing-the-tar-was-tempered-with") + if err == nil || !strings.Contains(err.Error(), "tar contents corrupted") { + t.Fatalf("unexpected error: %v", err) + } +} + // TestCopyToLocalFileOrDir tests untarAll in two cases : // 1: copy pod file to local file // 2: copy pod file into local directory @@ -333,7 +405,8 @@ func TestCopyToLocalFileOrDir(t *testing.T) { } defer srcTarFile.Close() - if err := untarAll(srcTarFile, destPath, getPrefix(srcFilePath)); err != nil { + opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) + if err := opts.untarAll(srcTarFile, destPath, getPrefix(srcFilePath)); err != nil { t.Errorf("unexpected error: %v", err) t.FailNow() } @@ -480,7 +553,8 @@ func TestBadTar(t *testing.T) { t.FailNow() } - if err := untarAll(&buf, dir, "/prefix"); err != nil { + opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) + if err := opts.untarAll(&buf, dir, "/prefix"); err != nil { t.Errorf("unexpected error: %v ", err) t.FailNow() } From eb865d82cb13c3a1571680f9b763d40761410791 Mon Sep 17 00:00:00 2001 From: Cheng Xing Date: Fri, 1 Mar 2019 11:36:59 -0800 Subject: [PATCH 22/96] Augmenting API call retry in nodeinfomanager --- pkg/volume/csi/nodeinfomanager/BUILD | 5 +- .../csi/nodeinfomanager/nodeinfomanager.go | 210 +++++++++++------- 2 files changed, 129 insertions(+), 86 deletions(-) diff --git a/pkg/volume/csi/nodeinfomanager/BUILD b/pkg/volume/csi/nodeinfomanager/BUILD index 3eadf06129b2f..b4b5c48934bf8 100644 --- a/pkg/volume/csi/nodeinfomanager/BUILD +++ b/pkg/volume/csi/nodeinfomanager/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", - "//pkg/util/node:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -15,10 +14,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 0a28d9c4ba946..33b09f62c029c 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -21,6 +21,8 @@ package nodeinfomanager // import "k8s.io/kubernetes/pkg/volume/csi/nodeinfomana import ( "encoding/json" "fmt" + "k8s.io/apimachinery/pkg/util/wait" + "time" csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" "github.com/golang/glog" @@ -29,12 +31,12 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/util/retry" csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" + csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/pkg/features" - nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -44,7 +46,15 @@ const ( annotationKeyNodeID = "csi.volume.kubernetes.io/nodeid" ) -var nodeKind = v1.SchemeGroupVersion.WithKind("Node") +var ( + nodeKind = v1.SchemeGroupVersion.WithKind("Node") + updateBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, + } +) // nodeInfoManager contains necessary common dependencies to update node info on both // the Node and CSINodeInfo objects. @@ -134,51 +144,58 @@ func (nim *nodeInfoManager) RemoveNodeInfo(driverName string) error { return nil } -// updateNode repeatedly attempts to update the corresponding node object +func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error { + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryUpdateNode(updateFuncs...); err != nil { + updateErrs = append(updateErrs, err) + return false, nil + } + return true, nil + }) + if err != nil { + return fmt.Errorf("error updating node: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + } + return nil +} + +// tryUpdateNode repeatedly attempts to update the corresponding node object // which is modified by applying the given update functions sequentially. // Because updateFuncs are applied sequentially, later updateFuncs should take into account // the effects of previous updateFuncs to avoid potential conflicts. For example, if multiple // functions update the same field, updates in the last function are persisted. -func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of Node before attempting update, so that - // existing changes are not overwritten. RetryOnConflict uses - // exponential backoff to avoid exhausting the apiserver. - - kubeClient := nim.volumeHost.GetKubeClient() - if kubeClient == nil { - return fmt.Errorf("error getting kube client") - } +func (nim *nodeInfoManager) tryUpdateNode(updateFuncs ...nodeUpdateFunc) error { + // Retrieve the latest version of Node before attempting update, so that + // existing changes are not overwritten. RetryOnConflict uses + // exponential backoff to avoid exhausting the apiserver. - nodeClient := kubeClient.CoreV1().Nodes() - originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) - node := originalNode.DeepCopy() - if err != nil { - return err // do not wrap error - } + kubeClient := nim.volumeHost.GetKubeClient() + if kubeClient == nil { + return fmt.Errorf("error getting kube client") + } - needUpdate := false - for _, update := range updateFuncs { - newNode, updated, err := update(node) - if err != nil { - return err - } - node = newNode - needUpdate = needUpdate || updated - } + nodeClient := kubeClient.CoreV1().Nodes() + originalNode, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) + node := originalNode.DeepCopy() + if err != nil { + return err // do not wrap error + } - if needUpdate { - // PatchNodeStatus can update both node's status and labels or annotations - // Updating status by directly updating node does not work - _, _, updateErr := nodeutil.PatchNodeStatus(kubeClient.CoreV1(), types.NodeName(node.Name), originalNode, node) - return updateErr // do not wrap error + needUpdate := false + for _, update := range updateFuncs { + newNode, updated, err := update(node) + if err != nil { + return err } + node = newNode + needUpdate = needUpdate || updated + } - return nil - }) - if retryErr != nil { - return fmt.Errorf("node update failed: %v", retryErr) + if needUpdate { + _, updateErr := nodeClient.Update(node) + return updateErr // do not wrap error } + return nil } @@ -330,23 +347,37 @@ func (nim *nodeInfoManager) updateCSINodeInfo( return fmt.Errorf("error getting CSI client") } - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) - if nodeInfo == nil || errors.IsNotFound(err) { - return nim.createNodeInfoObject(driverName, driverNodeID, topology) - } - if err != nil { - return err // do not wrap error + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryUpdateCSINodeInfo(csiKubeClient, driverName, driverNodeID, topology); err != nil { + updateErrs = append(updateErrs, err) + return false, nil } - - return nim.updateNodeInfoObject(nodeInfo, driverName, driverNodeID, topology) + return true, nil }) - if retryErr != nil { - return fmt.Errorf("CSINodeInfo update failed: %v", retryErr) + if err != nil { + return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) } return nil } +func (nim *nodeInfoManager) tryUpdateCSINodeInfo( + csiKubeClient csiclientset.Interface, + driverName string, + driverNodeID string, + topology *csipb.Topology) error { + + nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) + if nodeInfo == nil || errors.IsNotFound(err) { + return nim.createNodeInfoObject(driverName, driverNodeID, topology) + } + if err != nil { + return err // do not wrap error + } + + return nim.updateNodeInfoObject(nodeInfo, driverName, driverNodeID, topology) +} + func (nim *nodeInfoManager) createNodeInfoObject( driverName string, driverNodeID string, @@ -446,51 +477,62 @@ func (nim *nodeInfoManager) updateNodeInfoObject( return err // do not wrap error } -func (nim *nodeInfoManager) removeCSINodeInfo(csiDriverName string) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { +func (nim *nodeInfoManager) removeCSINodeInfo( + csiDriverName string) error { - csiKubeClient := nim.volumeHost.GetCSIClient() - if csiKubeClient == nil { - return fmt.Errorf("error getting CSI client") - } + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } - nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos() - nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) - if nodeInfo == nil || errors.IsNotFound(err) { - // do nothing - return nil - } - if err != nil { - return err // do not wrap error + var updateErrs []error + err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { + if err := nim.tryRemoveCSINodeInfo(csiKubeClient, csiDriverName); err != nil { + updateErrs = append(updateErrs, err) + return false, nil } + return true, nil + }) + if err != nil { + return fmt.Errorf("error updating CSINodeInfo: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + } + return nil +} - // Remove matching driver from driver list - var newDriverInfos []csiv1alpha1.CSIDriverInfo - for _, driverInfo := range nodeInfo.CSIDrivers { - if driverInfo.Driver != csiDriverName { - newDriverInfos = append(newDriverInfos, driverInfo) - } - } +func (nim *nodeInfoManager) tryRemoveCSINodeInfo( + csiKubeClient csiclientset.Interface, csiDriverName string) error { - if len(newDriverInfos) == len(nodeInfo.CSIDrivers) { - // No changes, don't update - return nil - } + nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos() + nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) + if nodeInfo == nil || errors.IsNotFound(err) { + // do nothing + return nil + } + if err != nil { + return err // do not wrap error + } - if len(newDriverInfos) == 0 { - // No drivers left, delete CSINodeInfo object - return nodeInfoClient.Delete(string(nim.nodeName), &metav1.DeleteOptions{}) + // Remove matching driver from driver list + var newDriverInfos []csiv1alpha1.CSIDriverInfo + for _, driverInfo := range nodeInfo.CSIDrivers { + if driverInfo.Driver != csiDriverName { + newDriverInfos = append(newDriverInfos, driverInfo) } + } - // TODO (verult) make sure CSINodeInfo has validation logic to prevent duplicate driver names - _, updateErr := nodeInfoClient.Update(nodeInfo) - return updateErr // do not wrap error + if len(newDriverInfos) == len(nodeInfo.CSIDrivers) { + // No changes, don't update + return nil + } - }) - if retryErr != nil { - return fmt.Errorf("CSINodeInfo update failed: %v", retryErr) + if len(newDriverInfos) == 0 { + // No drivers left, delete CSINodeInfo object + return nodeInfoClient.Delete(string(nim.nodeName), &metav1.DeleteOptions{}) } - return nil + + // TODO (verult) make sure CSINodeInfo has validation logic to prevent duplicate driver names + _, updateErr := nodeInfoClient.Update(nodeInfo) + return updateErr // do not wrap error } func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc { From b5f9d594db5b9c23f927bd4626e84214dd9d7b3d Mon Sep 17 00:00:00 2001 From: yankaiz Date: Wed, 6 Mar 2019 16:21:11 -0800 Subject: [PATCH 23/96] Bump debian-iptables to v11.0.1. Rebase docker image on debian-base:0.4.1 --- build/common.sh | 2 +- build/debian-base/Makefile | 2 +- build/debian-iptables/Makefile | 4 ++-- build/root/WORKSPACE | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/common.sh b/build/common.sh index 6ace6a06a564f..bbe91da83e54f 100755 --- a/build/common.sh +++ b/build/common.sh @@ -88,7 +88,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730 # # $1 - server architecture kube::build::get_docker_wrapped_binaries() { - debian_iptables_version=v10.2 + debian_iptables_version=v11.0.1 ### If you change any of these lists, please also update DOCKERIZED_BINARIES ### in build/BUILD. And kube::golang::server_image_targets case $1 in diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 379fa49196268..97899cde16918 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -18,7 +18,7 @@ REGISTRY ?= staging-k8s.gcr.io IMAGE ?= $(REGISTRY)/debian-base BUILD_IMAGE ?= debian-build -TAG ?= 0.3.2 +TAG ?= 0.4.1 TAR_FILE ?= rootfs.tar ARCH?=amd64 diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index 8c793db5aac77..d112248068318 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -16,12 +16,12 @@ REGISTRY?="staging-k8s.gcr.io" IMAGE=$(REGISTRY)/debian-iptables -TAG?=v10.2 +TAG?=v11.0.1 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x TEMP_DIR:=$(shell mktemp -d) -BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.3.2 +BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.1 # This option is for running docker manifest command export DOCKER_CLI_EXPERIMENTAL := enabled diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 2188540bc4cc7..94fbcc457c67d 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -70,10 +70,10 @@ http_file( docker_pull( name = "debian-iptables-amd64", - digest = "sha256:0987db7ce42949d20ed2647a65d4bee0b616b4d40c7ea54769cc24b7ad003677", + digest = "sha256:9c41b4c326304b94eb96fdd2e181aa6e9995cc4642fcdfb570cedd73a419ba39", registry = "k8s.gcr.io", repository = "debian-iptables-amd64", - tag = "v10.2", # ignored, but kept here for documentation + tag = "v11.0.1", # ignored, but kept here for documentation ) docker_pull( From 1bc9f002c8a7d31c71cc43610bc578e6a1524839 Mon Sep 17 00:00:00 2001 From: Rohit Jaini Date: Mon, 4 Mar 2019 19:37:49 -0800 Subject: [PATCH 24/96] Adding a check to make sure UseInstanceMetadata flag is true to get data from metadata. --- .../providers/azure/azure_test.go | 3 +- .../providers/azure/azure_zones.go | 51 +++++++++++-------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index e4e60bc6962f7..1d15df8a9d2f5 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -1677,7 +1677,8 @@ func validateEmptyConfig(t *testing.T, config string) { func TestGetZone(t *testing.T) { cloud := &Cloud{ Config: Config{ - Location: "eastus", + Location: "eastus", + UseInstanceMetadata: true, }, } testcases := []struct { diff --git a/pkg/cloudprovider/providers/azure/azure_zones.go b/pkg/cloudprovider/providers/azure/azure_zones.go index 23a0317eb960d..2ddddb7b64132 100644 --- a/pkg/cloudprovider/providers/azure/azure_zones.go +++ b/pkg/cloudprovider/providers/azure/azure_zones.go @@ -19,6 +19,7 @@ package azure import ( "context" "fmt" + "os" "strconv" "strings" @@ -49,31 +50,39 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // GetZone returns the Zone containing the current availability zone and locality region that the program is running in. // If the node is not running with availability zones, then it will fall back to fault domain. func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { - metadata, err := az.metadata.GetMetadata() - if err != nil { - return cloudprovider.Zone{}, err - } + if az.UseInstanceMetadata { + metadata, err := az.metadata.GetMetadata() + if err != nil { + return cloudprovider.Zone{}, err + } - if metadata.Compute == nil { - return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata") - } + if metadata.Compute == nil { + return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata") + } - zone := "" - if metadata.Compute.Zone != "" { - zoneID, err := strconv.Atoi(metadata.Compute.Zone) - if err != nil { - return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err) + zone := "" + if metadata.Compute.Zone != "" { + zoneID, err := strconv.Atoi(metadata.Compute.Zone) + if err != nil { + return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err) + } + zone = az.makeZone(zoneID) + } else { + glog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") + zone = metadata.Compute.FaultDomain } - zone = az.makeZone(zoneID) - } else { - glog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") - zone = metadata.Compute.FaultDomain - } - return cloudprovider.Zone{ - FailureDomain: zone, - Region: az.Location, - }, nil + return cloudprovider.Zone{ + FailureDomain: zone, + Region: az.Location, + }, nil + } + // if UseInstanceMetadata is false, get Zone name by calling ARM + hostname, err := os.Hostname() + if err != nil { + return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel") + } + return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname)) } // GetZoneByProviderID implements Zones.GetZoneByProviderID From 4768479a864ea3cb91aaaebf517041d7d1b025c3 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Tue, 26 Feb 2019 13:38:56 -0800 Subject: [PATCH 25/96] GetMountRefs fixed to handle corrupted mounts by treating it like an unmounted volume --- pkg/util/mount/mount_helper.go | 2 +- pkg/util/mount/mount_linux.go | 10 +++++++--- pkg/util/mount/mount_windows.go | 10 +++++++--- pkg/util/mount/nsenter_mount.go | 9 ++++----- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pkg/util/mount/mount_helper.go b/pkg/util/mount/mount_helper.go index 696843a70e14f..395c3f5ffa4b3 100644 --- a/pkg/util/mount/mount_helper.go +++ b/pkg/util/mount/mount_helper.go @@ -120,5 +120,5 @@ func IsCorruptedMnt(err error) bool { underlyingError = pe.Err } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES } diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 0cfb8261ae86f..bf35e8d55ee93 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -1003,10 +1003,14 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(pathname); os.IsNotExist(err) { + pathExists, pathErr := PathExists(pathname) + if !pathExists { return []string{}, nil - } else if err != nil { - return nil, err + } else if IsCorruptedMnt(pathErr) { + glog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", pathname) + return []string{}, nil + } else if pathErr != nil { + return nil, fmt.Errorf("error checking path %s: %v", pathname, pathErr) } realpath, err := filepath.EvalSymlinks(pathname) if err != nil { diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index dffc8cafdaac6..a5335d7936e4c 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -496,10 +496,14 @@ func getAllParentLinks(path string) ([]string, error) { // GetMountRefs : empty implementation here since there is no place to query all mount points on Windows func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - if _, err := os.Stat(normalizeWindowsPath(pathname)); os.IsNotExist(err) { + pathExists, pathErr := PathExists(normalizeWindowsPath(pathname)) + // TODO(#75012): Need a Windows specific IsCorruptedMnt function that checks against whatever errno's + // Windows emits when we try to Stat a corrupted mount + // https://golang.org/pkg/syscall/?GOOS=windows&GOARCH=amd64#Errno + if !pathExists { return []string{}, nil - } else if err != nil { - return nil, err + } else if pathErr != nil { + return nil, fmt.Errorf("error checking path %s: %v", normalizeWindowsPath(pathname), pathErr) } return []string{pathname}, nil } diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index d627a8f1fe448..7b2e4a040c582 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -337,12 +337,11 @@ func (mounter *NsenterMounter) SafeMakeDir(subdir string, base string, perm os.F } func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { - exists, err := mounter.ExistsPath(pathname) - if err != nil { - return nil, err - } - if !exists { + pathExists, pathErr := PathExists(pathname) + if !pathExists || IsCorruptedMnt(pathErr) { return []string{}, nil + } else if pathErr != nil { + return nil, fmt.Errorf("Error checking path %s: %v", pathname, pathErr) } hostpath, err := mounter.ne.EvalSymlinks(pathname, true /* mustExist */) if err != nil { From 40b53216256fd3beb47cc8133afa1602dffe4dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Fri, 8 Mar 2019 14:08:42 +0100 Subject: [PATCH 26/96] Update Cluster Autoscaler version to 1.12.3 --- cluster/gce/manifests/cluster-autoscaler.manifest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/manifests/cluster-autoscaler.manifest b/cluster/gce/manifests/cluster-autoscaler.manifest index 0d2a0edefa00b..8b09dbc14d965 100644 --- a/cluster/gce/manifests/cluster-autoscaler.manifest +++ b/cluster/gce/manifests/cluster-autoscaler.manifest @@ -17,7 +17,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.12.2", + "image": "k8s.gcr.io/cluster-autoscaler:v1.12.3", "livenessProbe": { "httpGet": { "path": "/health-check", From 88a910e272f14fdc16db08db843292763a759361 Mon Sep 17 00:00:00 2001 From: Weibin Lin Date: Sat, 27 Oct 2018 15:28:26 +0800 Subject: [PATCH 27/96] add module 'nf_conntrack' in ipvs prerequisite check --- pkg/proxy/ipvs/README.md | 2 +- pkg/proxy/ipvs/proxier.go | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/pkg/proxy/ipvs/README.md b/pkg/proxy/ipvs/README.md index fcdca80744928..9e8fa493fa486 100644 --- a/pkg/proxy/ipvs/README.md +++ b/pkg/proxy/ipvs/README.md @@ -259,7 +259,7 @@ ACCEPT all -- 0.0.0.0/0 0.0.0.0/0 match-set KUBE-EXT Currently, local-up scripts, GCE scripts and kubeadm support switching IPVS proxy mode via exporting environment variables or specifying flags. ### Prerequisite -Ensure IPVS required kernel modules +Ensure IPVS required kernel modules (**Notes**: use `nf_conntrack` instead of `nf_conntrack_ipv4` for Linux kernel 4.19 and later) ```shell ip_vs ip_vs_rr diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 7061e9ea1f894..974da1d5bf9cb 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -164,6 +164,7 @@ var ipvsModules = []string{ "ip_vs_wrr", "ip_vs_sh", "nf_conntrack_ipv4", + "nf_conntrack", } // In IPVS proxy mode, the following flags need to be set @@ -511,8 +512,21 @@ func CanUseIPVSProxier(handle KernelHandler, ipsetver IPSetVersioner) (bool, err wantModules.Insert(ipvsModules...) loadModules.Insert(mods...) modules := wantModules.Difference(loadModules).UnsortedList() - if len(modules) != 0 { - return false, fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", modules) + var missingMods []string + ConntrackiMissingCounter := 0 + for _, mod := range modules { + if strings.Contains(mod, "nf_conntrack") { + ConntrackiMissingCounter++ + } else { + missingMods = append(missingMods, mod) + } + } + if ConntrackiMissingCounter == 2 { + missingMods = append(missingMods, "nf_conntrack_ipv4(or nf_conntrack for Linux kernel 4.19 and later)") + } + + if len(missingMods) != 0 { + return false, fmt.Errorf("IPVS proxier will not be used because the following required kernel modules are not loaded: %v", missingMods) } // Check ipset version From 7b4dfe7c50503f087aaa7d9214b93ff9b9d22697 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 13 Mar 2019 10:29:41 +0800 Subject: [PATCH 28/96] Allow disable outbound snat when Azure standard load balancer is used --- pkg/cloudprovider/providers/azure/azure.go | 22 ++++++++++++++++--- .../providers/azure/azure_loadbalancer.go | 9 ++++---- .../providers/azure/azure_wrap.go | 8 +++++++ 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index a1df0eb6b312e..6b3a679574209 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -70,6 +70,8 @@ const ( var ( // Master nodes are not added to standard load balancer by default. defaultExcludeMasterFromStandardLB = true + // Outbound SNAT is enabled by default. + defaultDisableOutboundSNAT = false ) // Azure implements PVLabeler. @@ -139,6 +141,9 @@ type Config struct { // ExcludeMasterFromStandardLB excludes master nodes from standard load balancer. // If not set, it will be default to true. ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB" yaml:"excludeMasterFromStandardLB"` + // DisableOutboundSNAT disables the outbound SNAT for public load balancer rules. + // It should only be set when loadBalancerSku is standard. If not set, it will be default to false. + DisableOutboundSNAT *bool `json:"disableOutboundSNAT" yaml:"disableOutboundSNAT"` // Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount" yaml:"maximumLoadBalancerRuleCount"` @@ -265,9 +270,20 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { config.CloudProviderRateLimitBucketWrite) } - // Do not add master nodes to standard LB by default. - if config.ExcludeMasterFromStandardLB == nil { - config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB + if strings.EqualFold(config.LoadBalancerSku, loadBalancerSkuStandard) { + // Do not add master nodes to standard LB by default. + if config.ExcludeMasterFromStandardLB == nil { + config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB + } + + // Enable outbound SNAT by default. + if config.DisableOutboundSNAT == nil { + config.DisableOutboundSNAT = &defaultDisableOutboundSNAT + } + } else { + if config.DisableOutboundSNAT != nil && *config.DisableOutboundSNAT { + return nil, fmt.Errorf("disableOutboundSNAT should only set when loadBalancerSku is standard") + } } azClientConfig := &azClientConfig{ diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 74394b4e4b73a..580c030d61370 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -939,10 +939,11 @@ func (az *Cloud) reconcileLoadBalancerRule( BackendAddressPool: &network.SubResource{ ID: to.StringPtr(lbBackendPoolID), }, - LoadDistribution: loadDistribution, - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - EnableFloatingIP: to.BoolPtr(true), + LoadDistribution: loadDistribution, + FrontendPort: to.Int32Ptr(port.Port), + BackendPort: to.Int32Ptr(port.Port), + EnableFloatingIP: to.BoolPtr(true), + DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), }, } if protocol == v1.ProtocolTCP { diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index d64ff76d12dae..ab312dffaead1 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -286,6 +286,14 @@ func (az *Cloud) excludeMasterNodesFromStandardLB() bool { return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB } +func (az *Cloud) disableLoadBalancerOutboundSNAT() bool { + if !az.useStandardLoadBalancer() || az.DisableOutboundSNAT == nil { + return false + } + + return *az.DisableOutboundSNAT +} + // IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider. // Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer // backends. Azure routes and managed disks are also not supported for them. From a8b8ba9e3afe03a229dbee6b554e6d52f576fc8d Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 11 Mar 2019 16:40:23 +0800 Subject: [PATCH 29/96] Ensure Azure load balancer cleaned up on 404 or 403 --- .../providers/azure/azure_loadbalancer.go | 32 ++++++++-- .../azure/azure_loadbalancer_test.go | 60 +++++++++++++++++++ .../providers/azure/azure_test.go | 7 +++ .../providers/azure/azure_wrap.go | 13 ++++ 4 files changed, 106 insertions(+), 6 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 74394b4e4b73a..d679e71dae60f 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -169,27 +169,47 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) + glog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName) + + ignoreErrors := func(err error) error { + if ignoreStatusNotFoundFromError(err) == nil { + glog.V(5).Infof("EnsureLoadBalancerDeleted: ignoring StatusNotFound error because the resource doesn't exist (%v)", err) + return nil + } + + if ignoreStatusForbiddenFromError(err) == nil { + glog.V(5).Infof("EnsureLoadBalancerDeleted: ignoring StatusForbidden error (%v). This may be caused by wrong configuration via service annotations", err) + return nil + } + + return err + } serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal) - if err != nil { + if ignoreErrors(err) != nil { return err } glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { - return err + if ignoreErrors(err) != nil { + return err + } } if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil { - return err + if ignoreErrors(err) != nil { + return err + } } if _, err := az.reconcilePublicIP(clusterName, service, nil, false /* wantLb */); err != nil { - return err + if ignoreErrors(err) != nil { + return err + } } - glog.V(2).Infof("delete(%s): FINISH", serviceName) + glog.V(2).Infof("Delete service (%s): FINISH", serviceName) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go index f11742e5f39df..e0ff9cc3a607d 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer_test.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "context" "fmt" "reflect" "testing" @@ -243,3 +244,62 @@ func TestGetIdleTimeout(t *testing.T) { }) } } + +func TestEnsureLoadBalancerDeleted(t *testing.T) { + const vmCount = 8 + const availabilitySetCount = 4 + const serviceCount = 9 + + tests := []struct { + desc string + service v1.Service + expectCreateError bool + }{ + { + desc: "external service should be created and deleted successfully", + service: getTestService("test1", v1.ProtocolTCP, 80), + }, + { + desc: "internal service should be created and deleted successfully", + service: getInternalTestService("test2", 80), + }, + { + desc: "annotated service with same resourceGroup should be created and deleted successfully", + service: getResourceGroupTestService("test3", "rg", "", 80), + }, + { + desc: "annotated service with different resourceGroup shouldn't be created but should be deleted successfully", + service: getResourceGroupTestService("test4", "random-rg", "1.2.3.4", 80), + expectCreateError: true, + }, + } + + az := getTestCloud() + for i, c := range tests { + clusterResources := getClusterResources(az, vmCount, availabilitySetCount) + getTestSecurityGroup(az) + if c.service.Annotations[ServiceAnnotationLoadBalancerInternal] == "true" { + addTestSubnet(t, az, &c.service) + } + + // create the service first. + lbStatus, err := az.EnsureLoadBalancer(context.TODO(), testClusterName, &c.service, clusterResources.nodes) + if c.expectCreateError { + assert.NotNil(t, err, "TestCase[%d]: %s", i, c.desc) + } else { + assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc) + assert.NotNil(t, lbStatus, "TestCase[%d]: %s", i, c.desc) + result, err := az.LoadBalancerClient.List(context.TODO(), az.Config.ResourceGroup) + assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc) + assert.Equal(t, len(result), 1, "TestCase[%d]: %s", i, c.desc) + assert.Equal(t, len(*result[0].LoadBalancingRules), 1, "TestCase[%d]: %s", i, c.desc) + } + + // finally, delete it. + err = az.EnsureLoadBalancerDeleted(context.TODO(), testClusterName, &c.service) + assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc) + result, err := az.LoadBalancerClient.List(context.Background(), az.Config.ResourceGroup) + assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc) + assert.Equal(t, len(result), 0, "TestCase[%d]: %s", i, c.desc) + } +} diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 326677f533c98..876749b7af55c 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -1138,6 +1138,13 @@ func getInternalTestService(identifier string, requestedPorts ...int32) v1.Servi return svc } +func getResourceGroupTestService(identifier, resourceGroup, loadBalancerIP string, requestedPorts ...int32) v1.Service { + svc := getTestService(identifier, v1.ProtocolTCP, requestedPorts...) + svc.Spec.LoadBalancerIP = loadBalancerIP + svc.Annotations[ServiceAnnotationLoadBalancerResourceGroup] = resourceGroup + return svc +} + func setLoadBalancerModeAnnotation(service *v1.Service, lbMode string) { service.Annotations[ServiceAnnotationLoadBalancerMode] = lbMode } diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index d64ff76d12dae..f44aea78cc399 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -70,6 +70,19 @@ func ignoreStatusNotFoundFromError(err error) error { return err } +// ignoreStatusForbiddenFromError returns nil if the status code is StatusForbidden. +// This happens when AuthorizationFailed is reported from Azure API. +func ignoreStatusForbiddenFromError(err error) error { + if err == nil { + return nil + } + v, ok := err.(autorest.DetailedError) + if ok && v.StatusCode == http.StatusForbidden { + return nil + } + return err +} + /// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache /// The service side has throttling control that delays responses if there're multiple requests onto certain vm /// resource request in short period. From cc4791abeb64587645d481d9a260b0f4475b9084 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 7 Mar 2019 05:38:06 +0000 Subject: [PATCH 30/96] fix smb unmount issue on Windows fix log warning use IsCorruptedMnt in GetMountRefs on Windows use errorno in IsCorruptedMnt check fix comments: add more error code add more error no checking change year fix comments fix bazel error fix bazel fix bazel fix bazel revert bazel change --- pkg/util/mount/BUILD | 4 +- ...mount_helper.go => mount_helper_common.go} | 21 ------ pkg/util/mount/mount_helper_unix.go | 44 ++++++++++++ pkg/util/mount/mount_helper_windows.go | 68 +++++++++++++++++++ pkg/util/mount/mount_windows.go | 11 +-- 5 files changed, 121 insertions(+), 27 deletions(-) rename pkg/util/mount/{mount_helper.go => mount_helper_common.go} (85%) create mode 100644 pkg/util/mount/mount_helper_unix.go create mode 100644 pkg/util/mount/mount_helper_windows.go diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index a199f6d32b760..30369398f40fc 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -9,7 +9,9 @@ go_library( "exec_mount_unsupported.go", "fake.go", "mount.go", - "mount_helper.go", + "mount_helper_common.go", + "mount_helper_unix.go", + "mount_helper_windows.go", "mount_linux.go", "mount_unsupported.go", "mount_windows.go", diff --git a/pkg/util/mount/mount_helper.go b/pkg/util/mount/mount_helper_common.go similarity index 85% rename from pkg/util/mount/mount_helper.go rename to pkg/util/mount/mount_helper_common.go index 395c3f5ffa4b3..942b6f05b9fb9 100644 --- a/pkg/util/mount/mount_helper.go +++ b/pkg/util/mount/mount_helper_common.go @@ -19,7 +19,6 @@ package mount import ( "fmt" "os" - "syscall" "github.com/golang/glog" ) @@ -102,23 +101,3 @@ func PathExists(path string) (bool, error) { return false, err } } - -// IsCorruptedMnt return true if err is about corrupted mount point -func IsCorruptedMnt(err error) bool { - if err == nil { - return false - } - var underlyingError error - switch pe := err.(type) { - case nil: - return false - case *os.PathError: - underlyingError = pe.Err - case *os.LinkError: - underlyingError = pe.Err - case *os.SyscallError: - underlyingError = pe.Err - } - - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES -} diff --git a/pkg/util/mount/mount_helper_unix.go b/pkg/util/mount/mount_helper_unix.go new file mode 100644 index 0000000000000..880a89e159644 --- /dev/null +++ b/pkg/util/mount/mount_helper_unix.go @@ -0,0 +1,44 @@ +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "os" + "syscall" +) + +// IsCorruptedMnt return true if err is about corrupted mount point +func IsCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES +} diff --git a/pkg/util/mount/mount_helper_windows.go b/pkg/util/mount/mount_helper_windows.go new file mode 100644 index 0000000000000..64a94727be9e4 --- /dev/null +++ b/pkg/util/mount/mount_helper_windows.go @@ -0,0 +1,68 @@ +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "os" + "syscall" + + "github.com/golang/glog" +) + +// following failure codes are from https://docs.microsoft.com/en-us/windows/desktop/debug/system-error-codes--1300-1699- +// ERROR_BAD_NETPATH = 53 +// ERROR_NETWORK_BUSY = 54 +// ERROR_UNEXP_NET_ERR = 59 +// ERROR_NETNAME_DELETED = 64 +// ERROR_NETWORK_ACCESS_DENIED = 65 +// ERROR_BAD_DEV_TYPE = 66 +// ERROR_BAD_NET_NAME = 67 +// ERROR_SESSION_CREDENTIAL_CONFLICT = 1219 +// ERROR_LOGON_FAILURE = 1326 +var errorNoList = [...]int{53, 54, 59, 64, 65, 66, 67, 1219, 1326} + +// IsCorruptedMnt return true if err is about corrupted mount point +func IsCorruptedMnt(err error) bool { + if err == nil { + return false + } + + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + + if ee, ok := underlyingError.(syscall.Errno); ok { + for _, errno := range errorNoList { + if int(ee) == errno { + glog.Warningf("IsCorruptedMnt failed with error: %v, error code: %v", err, errno) + return true + } + } + } + + return false +} diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index a5335d7936e4c..383b239bbf544 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -496,14 +496,15 @@ func getAllParentLinks(path string) ([]string, error) { // GetMountRefs : empty implementation here since there is no place to query all mount points on Windows func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - pathExists, pathErr := PathExists(normalizeWindowsPath(pathname)) - // TODO(#75012): Need a Windows specific IsCorruptedMnt function that checks against whatever errno's - // Windows emits when we try to Stat a corrupted mount - // https://golang.org/pkg/syscall/?GOOS=windows&GOARCH=amd64#Errno + windowsPath := normalizeWindowsPath(pathname) + pathExists, pathErr := PathExists(windowsPath) if !pathExists { return []string{}, nil + } else if IsCorruptedMnt(pathErr) { + glog.Warningf("GetMountRefs found corrupted mount at %s, treating as unmounted path", windowsPath) + return []string{}, nil } else if pathErr != nil { - return nil, fmt.Errorf("error checking path %s: %v", normalizeWindowsPath(pathname), pathErr) + return nil, fmt.Errorf("error checking path %s: %v", windowsPath, pathErr) } return []string{pathname}, nil } From aa3b8c3363dc7d59cd26f0236dfc5d44cb440472 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Thu, 7 Mar 2019 11:53:44 -0800 Subject: [PATCH 31/96] kubelet: updated logic of verifying a static critical pod - check if a pod is static by its static pod info - meanwhile, check if a pod is critical by its corresponding mirror pod info --- pkg/kubelet/eviction/eviction_manager.go | 20 ++- pkg/kubelet/eviction/eviction_manager_test.go | 6 + pkg/kubelet/eviction/types.go | 4 + pkg/kubelet/kubelet.go | 2 +- pkg/kubelet/kubelet_test.go | 2 +- pkg/kubelet/runonce_test.go | 3 +- test/e2e_node/BUILD | 2 + test/e2e_node/system_node_critical_test.go | 137 ++++++++++++++++++ 8 files changed, 170 insertions(+), 6 deletions(-) create mode 100644 test/e2e_node/system_node_critical_test.go diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 2b6fe919b2612..be2c2fcfacab2 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -57,6 +57,8 @@ type managerImpl struct { config Config // the function to invoke to kill a pod killPodFunc KillPodFunc + // the function to get the mirror pod by a given statid pod + mirrorPodFunc MirrorPodFunc // the interface that knows how to do image gc imageGC ImageGC // the interface that knows how to do container gc @@ -99,6 +101,7 @@ func NewManager( summaryProvider stats.SummaryProvider, config Config, killPodFunc KillPodFunc, + mirrorPodFunc MirrorPodFunc, imageGC ImageGC, containerGC ContainerGC, recorder record.EventRecorder, @@ -108,6 +111,7 @@ func NewManager( manager := &managerImpl{ clock: clock, killPodFunc: killPodFunc, + mirrorPodFunc: mirrorPodFunc, imageGC: imageGC, containerGC: containerGC, config: config, @@ -544,9 +548,19 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // If the pod is marked as critical and static, and support for critical pod annotations is enabled, // do not evict such pods. Static pods are not re-admitted after evictions. // https://github.com/kubernetes/kubernetes/issues/40573 has more details. - if kubelettypes.IsCriticalPod(pod) && kubepod.IsStaticPod(pod) { - glog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) - return false + if kubepod.IsStaticPod(pod) { + // need mirrorPod to check its "priority" value; static pod doesn't carry it + if mirrorPod, ok := m.mirrorPodFunc(pod); ok && mirrorPod != nil { + // skip only when it's a static and critical pod + if kubelettypes.IsCriticalPod(mirrorPod) { + glog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) + return false + } + } else { + // we should never hit this + glog.Errorf("eviction manager: cannot get mirror pod from static pod %s, so cannot evict it", format.Pod(pod)) + return false + } } status := v1.PodStatus{ Phase: v1.PodFailed, diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index e8b4a4e36d33c..20704b2416f72 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -1165,6 +1165,11 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { activePodsFunc := func() []*v1.Pod { return pods } + mirrorPodFunc := func(staticPod *v1.Pod) (*v1.Pod, bool) { + mirrorPod := staticPod.DeepCopy() + mirrorPod.Annotations[kubelettypes.ConfigSourceAnnotationKey] = kubelettypes.ApiserverSource + return mirrorPod, true + } fakeClock := clock.NewFakeClock(time.Now()) podKiller := &mockPodKiller{} @@ -1199,6 +1204,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) { manager := &managerImpl{ clock: fakeClock, killPodFunc: podKiller.killPodNow, + mirrorPodFunc: mirrorPodFunc, imageGC: diskGC, containerGC: diskGC, config: config, diff --git a/pkg/kubelet/eviction/types.go b/pkg/kubelet/eviction/types.go index d78e7e0695b2f..7256f4edb25a1 100644 --- a/pkg/kubelet/eviction/types.go +++ b/pkg/kubelet/eviction/types.go @@ -94,6 +94,10 @@ type ContainerGC interface { // gracePeriodOverride - the grace period override to use instead of what is on the pod spec type KillPodFunc func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error +// MirrorPodFunc returns the mirror pod for the given static pod and +// whether it was known to the pod manager. +type MirrorPodFunc func(*v1.Pod) (*v1.Pod, bool) + // ActivePodsFunc returns pods bound to the kubelet that are active (i.e. non-terminal state) type ActivePodsFunc func() []*v1.Pod diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 2888c1736a889..0d871f63dd344 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -821,7 +821,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity) // setup eviction manager - evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock) + evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock) klet.evictionManager = evictionManager klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 509a1f532fec3..cfbade0acbd2b 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -301,7 +301,7 @@ func newTestKubeletWithImageList( Namespace: "", } // setup eviction manager - evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock) + evictionManager, evictionAdmitHandler := eviction.NewManager(kubelet.resourceAnalyzer, eviction.Config{}, killPodNow(kubelet.podWorkers, fakeRecorder), kubelet.podManager.GetMirrorPodByPod, kubelet.imageManager, kubelet.containerGC, fakeRecorder, nodeRef, kubelet.clock) kubelet.evictionManager = evictionManager kubelet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 6047d664bc0d5..7bd35077abd58 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -120,7 +120,8 @@ func TestRunOnce(t *testing.T) { fakeKillPodFunc := func(pod *v1.Pod, podStatus v1.PodStatus, gracePeriodOverride *int64) error { return nil } - evictionManager, evictionAdmitHandler := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, nil, nil, kb.recorder, nodeRef, kb.clock) + fakeMirrodPodFunc := func(*v1.Pod) (*v1.Pod, bool) { return nil, false } + evictionManager, evictionAdmitHandler := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, fakeMirrodPodFunc, nil, nil, kb.recorder, nodeRef, kb.clock) kb.evictionManager = evictionManager kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index db390f13d1aeb..9fe26cd129413 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -101,6 +101,7 @@ go_test( "runtime_conformance_test.go", "security_context_test.go", "summary_test.go", + "system_node_critical_test.go", "volume_manager_test.go", ], embed = [":go_default_library"], @@ -119,6 +120,7 @@ go_test( "//pkg/kubelet/cm/cpuset:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/eviction:go_default_library", + "//pkg/kubelet/eviction/api:go_default_library", "//pkg/kubelet/images:go_default_library", "//pkg/kubelet/kubeletconfig:go_default_library", "//pkg/kubelet/kubeletconfig/status:go_default_library", diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go new file mode 100644 index 0000000000000..c4de2b499b632 --- /dev/null +++ b/test/e2e_node/system_node_critical_test.go @@ -0,0 +1,137 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "fmt" + "os" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/uuid" + kubeapi "k8s.io/kubernetes/pkg/apis/core" + kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() { + f := framework.NewDefaultFramework("system-node-critical-pod-test") + // this test only manipulates pods in kube-system + f.SkipNamespaceCreation = true + + Context("when create a system-node-critical pod", func() { + tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { + diskConsumed := resource.MustParse("200Mi") + summary := eventuallyGetSummary() + availableBytes := *(summary.Node.Fs.AvailableBytes) + initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))} + initialConfig.EvictionMinimumReclaim = map[string]string{} + }) + + // Place the remainder of the test within a context so that the kubelet config is set before and after the test. + Context("", func() { + var staticPodName, mirrorPodName, podPath string + ns := kubeapi.NamespaceSystem + + BeforeEach(func() { + By("create a static system-node-critical pod") + staticPodName = "static-disk-hog-" + string(uuid.NewUUID()) + mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName + podPath = framework.TestContext.KubeletConfig.StaticPodPath + // define a static pod consuming disk gradually + // the upper limit is 1024 (iterations) * 10485760 bytes (10MB) = 10GB + err := createStaticSystemNodeCriticalPod( + podPath, staticPodName, ns, busyboxImage, v1.RestartPolicyNever, 1024, + "dd if=/dev/urandom of=file${i} bs=10485760 count=1 2>/dev/null; sleep .1;", + ) + Expect(err).ShouldNot(HaveOccurred()) + + By("wait for the mirror pod to be running") + Eventually(func() error { + return checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + }, time.Minute, time.Second*2).Should(BeNil()) + }) + + It("should not be evicted upon DiskPressure", func() { + By("wait for the node to have DiskPressure condition") + Eventually(func() error { + if hasNodeCondition(f, v1.NodeDiskPressure) { + return nil + } + msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure) + framework.Logf(msg) + return fmt.Errorf(msg) + }, time.Minute*2, time.Second*4).Should(BeNil()) + + By("check if it's running all the time") + Consistently(func() error { + err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns) + if err == nil { + framework.Logf("mirror pod %q is running", mirrorPodName) + } else { + framework.Logf(err.Error()) + } + return err + }, time.Minute*8, time.Second*4).ShouldNot(HaveOccurred()) + }) + AfterEach(func() { + By("delete the static pod") + err := deleteStaticPod(podPath, staticPodName, ns) + Expect(err).ShouldNot(HaveOccurred()) + + By("wait for the mirror pod to disappear") + Eventually(func() error { + return checkMirrorPodDisappear(f.ClientSet, mirrorPodName, ns) + }, time.Minute, time.Second*2).Should(BeNil()) + }) + }) + }) +}) + +func createStaticSystemNodeCriticalPod(dir, name, namespace, image string, restart v1.RestartPolicy, + iterations int, command string) error { + template := ` +apiVersion: v1 +kind: Pod +metadata: + name: %s + namespace: %s +spec: + priorityClassName: system-node-critical + containers: + - name: %s + image: %s + restartPolicy: %s + command: ["sh", "-c", "i=0; while [ $i -lt %d ]; do %s i=$(($i+1)); done; while true; do sleep 5; done"] +` + file := staticPodPath(dir, name, namespace) + podYaml := fmt.Sprintf(template, name, namespace, name, image, string(restart), iterations, command) + + f, err := os.OpenFile(file, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0666) + if err != nil { + return err + } + defer f.Close() + + _, err = f.WriteString(podYaml) + return err +} From 6ec96f39153cea739308a38955e4c953e34d5508 Mon Sep 17 00:00:00 2001 From: yankaiz Date: Wed, 6 Mar 2019 14:23:52 -0800 Subject: [PATCH 32/96] Allow session affinity a period of time to setup for new services. This is to deal with the flaky session affinity test. --- test/e2e/framework/service_util.go | 22 ++++++++-------------- test/e2e/network/service.go | 12 ++++++------ 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 94c7b4871ebd7..01b6e90201dc1 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -1464,6 +1464,7 @@ type affinityTracker struct { // Record the response going to a given host. func (at *affinityTracker) recordHost(host string) { at.hostTrace = append(at.hostTrace, host) + Logf("Received response from host: %s", host) } // Check that we got a constant count requests going to the same host. @@ -1491,13 +1492,11 @@ func checkAffinityFailed(tracker affinityTracker, err string) { } // CheckAffinity function tests whether the service affinity works as expected. -// If affinity is expected and transitionState is true, the test will -// return true once affinityConfirmCount number of same response observed in a -// row. If affinity is not expected, the test will keep observe until different -// responses observed. The function will return false only when no expected -// responses observed before timeout. If transitionState is false, the test will -// fail once different host is given if shouldHold is true. -func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold, transitionState bool) bool { +// If affinity is expected, the test will return true once affinityConfirmCount +// number of same response observed in a row. If affinity is not expected, the +// test will keep observe until different responses observed. The function will +// return false only in case of unexpected errors. +func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold bool) bool { targetIpPort := net.JoinHostPort(targetIp, strconv.Itoa(targetPort)) cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIpPort) timeout := ServiceTestTimeout @@ -1521,13 +1520,8 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target if !shouldHold && !affinityHolds { return true, nil } - if shouldHold { - if !transitionState && !affinityHolds { - return true, fmt.Errorf("Affintity should hold but didn't.") - } - if trackerFulfilled && affinityHolds { - return true, nil - } + if shouldHold && trackerFulfilled && affinityHolds { + return true, nil } return false, nil }); pollErr != nil { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 2ae0862ca0d78..441a00bc59e6d 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -2068,17 +2068,17 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf Expect(err).NotTo(HaveOccurred()) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true, false)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, false, true)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, false)).To(BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true, true)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) } } @@ -2106,16 +2106,16 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true, false)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false, true)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true, true)).To(BeTrue()) + Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) } } From f071c20d0d5ae39dbfd07adec5b9db2532d8d8ef Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Mon, 18 Mar 2019 17:16:49 -0400 Subject: [PATCH 33/96] Restore username and password kubectl flags --- pkg/kubectl/cmd/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index b15dcc53b1f33..f85dc83ae9423 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -380,7 +380,7 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { // a.k.a. change all "_" to "-". e.g. glog package flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) - kubeConfigFlags := genericclioptions.NewConfigFlags() + kubeConfigFlags := genericclioptions.NewConfigFlags().WithDeprecatedPasswordFlag() kubeConfigFlags.AddFlags(flags) matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) matchVersionKubeConfigFlags.AddFlags(cmds.PersistentFlags()) From 2e7d0997b3a8af662a7f7b9937ae177ac37e3698 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 18 Mar 2019 16:28:45 -0500 Subject: [PATCH 34/96] build/gci: bump CNI version to 0.7.5 --- build/debian-hyperkube-base/Makefile | 2 +- build/rpms/kubeadm.spec | 2 +- build/rpms/kubelet.spec | 2 +- cluster/gce/gci/configure.sh | 4 ++-- test/e2e_node/remote/utils.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index 4a67da27a09a0..f0f54a08a242d 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -25,7 +25,7 @@ ALL_ARCH = amd64 arm arm64 ppc64le s390x CACHEBUST?=1 BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3.2 -CNI_VERSION=v0.6.0 +CNI_VERSION=v0.7.5 TEMP_DIR:=$(shell mktemp -d) CNI_TARBALL=cni-plugins-$(ARCH)-$(CNI_VERSION).tgz diff --git a/build/rpms/kubeadm.spec b/build/rpms/kubeadm.spec index b38ecdfb9bcc9..a65eeb231acc4 100644 --- a/build/rpms/kubeadm.spec +++ b/build/rpms/kubeadm.spec @@ -5,7 +5,7 @@ License: ASL 2.0 Summary: Container Cluster Manager - Kubernetes Cluster Bootstrapping Tool Requires: kubelet >= 1.8.0 Requires: kubectl >= 1.8.0 -Requires: kubernetes-cni >= 0.5.1 +Requires: kubernetes-cni >= 0.7.5 Requires: cri-tools >= 1.11.0 URL: https://kubernetes.io diff --git a/build/rpms/kubelet.spec b/build/rpms/kubelet.spec index d5e85bf908279..4cf34a504cba6 100644 --- a/build/rpms/kubelet.spec +++ b/build/rpms/kubelet.spec @@ -7,7 +7,7 @@ Summary: Container Cluster Manager - Kubernetes Node Agent URL: https://kubernetes.io Requires: iptables >= 1.4.21 -Requires: kubernetes-cni >= 0.5.1 +Requires: kubernetes-cni >= 0.7.5 Requires: socat Requires: util-linux Requires: ethtool diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 3cf2ba1d5210c..45d9bbd36de7b 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -24,8 +24,8 @@ set -o nounset set -o pipefail ### Hardcoded constants -DEFAULT_CNI_VERSION="v0.6.0" -DEFAULT_CNI_SHA1="d595d3ded6499a64e8dac02466e2f5f2ce257c9f" +DEFAULT_CNI_VERSION="v0.7.5" +DEFAULT_CNI_SHA1="52e9d2de8a5f927307d9397308735658ee44ab8d" DEFAULT_NPD_VERSION="v0.6.0" DEFAULT_NPD_SHA1="a28e960a21bb74bc0ae09c267b6a340f30e5b3a6" DEFAULT_CRICTL_VERSION="v1.12.0" diff --git a/test/e2e_node/remote/utils.go b/test/e2e_node/remote/utils.go index 28ab74cf52a1f..1c8a29c479a52 100644 --- a/test/e2e_node/remote/utils.go +++ b/test/e2e_node/remote/utils.go @@ -27,7 +27,7 @@ import ( // utils.go contains functions used across test suites. const ( - cniVersion = "v0.6.0" + cniVersion = "v0.7.5" cniArch = "amd64" cniDirectory = "cni/bin" // The CNI tarball places binaries under directory under "cni/bin". cniConfDirectory = "cni/net.d" From 2b4e00412844fed7386dfd200347cacee1248bd2 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 14 Mar 2019 14:55:44 +0000 Subject: [PATCH 35/96] fix race condition issue for smb mount on windows change var name --- pkg/util/mount/BUILD | 1 + pkg/util/mount/mount_windows.go | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index a199f6d32b760..f0e9d1fb2a4e3 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -58,6 +58,7 @@ go_library( ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/util/file:go_default_library", + "//pkg/util/keymutex:go_default_library", "//pkg/util/nsenter:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index a5335d7936e4c..e023a1ce6d350 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -29,6 +29,7 @@ import ( "syscall" "github.com/golang/glog" + "k8s.io/kubernetes/pkg/util/keymutex" utilfile "k8s.io/kubernetes/pkg/util/file" ) @@ -49,6 +50,9 @@ func New(mounterPath string) Interface { } } +// acquire lock for smb mount +var getSMBMountMutex = keymutex.NewHashed(0) + // Mount : mounts source to target with given options. // currently only supports cifs(smb), bind mount(for disk) func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error { @@ -84,6 +88,10 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio return fmt.Errorf("only cifs mount is supported now, fstype: %q, mounting source (%q), target (%q), with options (%q)", fstype, source, target, options) } + // lock smb mount for the same source + getSMBMountMutex.LockKey(source) + defer getSMBMountMutex.UnlockKey(source) + if output, err := newSMBMapping(options[0], options[1], source); err != nil { if isSMBMappingExist(source) { glog.V(2).Infof("SMB Mapping(%s) already exists, now begin to remove and remount", source) From 116988017d9e20ccfef6ec22be2e15ce2f2f5abc Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Thu, 7 Feb 2019 15:40:31 -0800 Subject: [PATCH 36/96] allows configuring NPD release and flags on GCI and add cluster e2e test --- cluster/gce/config-default.sh | 2 + cluster/gce/config-test.sh | 2 + cluster/gce/gci/configure-helper.sh | 30 +-- cluster/gce/gci/configure.sh | 6 +- cluster/gce/util.sh | 2 + test/e2e/framework/kubelet_stats.go | 6 +- test/e2e/node/BUILD | 2 + test/e2e/node/node_problem_detector.go | 282 +++++++++++++++++++++++++ 8 files changed, 313 insertions(+), 19 deletions(-) create mode 100644 test/e2e/node/node_problem_detector.go diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 6da0ebacaaa31..e65f3bf298179 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -285,6 +285,8 @@ else fi NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}" NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" +NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" +NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 5634aa3ff7e14..5d69e19f9f38c 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -292,6 +292,8 @@ else fi NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}" NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}" +NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}" +NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" # Optional: Create autoscaler for cluster's nodes. ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 59daa7fc67292..c8100b28671f8 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1248,21 +1248,25 @@ EOF function start-node-problem-detector { echo "Start node problem detector" local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector" - local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json" - # TODO(random-liu): Handle this for alternative container runtime. - local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json" - local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/docker-monitor-counter.json" echo "Using node problem detector binary at ${npd_bin}" - local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}" - flags+=" --logtostderr" - flags+=" --system-log-monitors=${km_config},${dm_config}" - flags+=" --custom-plugin-monitors=${custom_km_config}" - flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig" - local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256} - flags+=" --port=${npd_port}" - if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then - flags+=" ${EXTRA_NPD_ARGS}" + + local flags="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}" + if [[ -z "${flags}" ]]; then + local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json" + # TODO(random-liu): Handle this for alternative container runtime. + local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json" + local -r custom_km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/systemd-monitor-counter.json,${KUBE_HOME}/node-problem-detector/config/docker-monitor-counter.json" + flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}" + flags+=" --logtostderr" + flags+=" --system-log-monitors=${km_config},${dm_config}" + flags+=" --custom-plugin-monitors=${custom_km_config}" + local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256} + flags+=" --port=${npd_port}" + if [[ -n "${EXTRA_NPD_ARGS:-}" ]]; then + flags+=" ${EXTRA_NPD_ARGS}" + fi fi + flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig" # Write the systemd service file for node problem detector. cat </etc/systemd/system/node-problem-detector.service diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 3cf2ba1d5210c..803fa1e5f9911 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -202,12 +202,12 @@ function install-node-problem-detector { local -r npd_tar="node-problem-detector-${npd_version}.tar.gz" if is-preloaded "${npd_tar}" "${npd_sha1}"; then - echo "node-problem-detector is preloaded." + echo "${npd_tar} is preloaded." return fi - echo "Downloading node problem detector." - local -r npd_release_path="https://storage.googleapis.com/kubernetes-release" + echo "Downloading ${npd_tar}." + local -r npd_release_path="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-https://storage.googleapis.com/kubernetes-release}" download-or-bust "${npd_sha1}" "${npd_release_path}/node-problem-detector/${npd_tar}" local -r npd_dir="${KUBE_HOME}/node-problem-detector" mkdir -p "${npd_dir}" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index fdb7c548d95ca..ecdf70e717895 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -843,6 +843,8 @@ ENABLE_CLUSTER_UI: $(yaml-quote ${ENABLE_CLUSTER_UI:-false}) ENABLE_NODE_PROBLEM_DETECTOR: $(yaml-quote ${ENABLE_NODE_PROBLEM_DETECTOR:-none}) NODE_PROBLEM_DETECTOR_VERSION: $(yaml-quote ${NODE_PROBLEM_DETECTOR_VERSION:-}) NODE_PROBLEM_DETECTOR_TAR_HASH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TAR_HASH:-}) +NODE_PROBLEM_DETECTOR_RELEASE_PATH: $(yaml-quote ${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}) +NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS: $(yaml-quote ${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}) ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false}) LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-}) ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-}) diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go index 3803f088210e6..8f3803f38d0ac 100644 --- a/test/e2e/framework/kubelet_stats.go +++ b/test/e2e/framework/kubelet_stats.go @@ -281,8 +281,8 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration return badMetrics, nil } -// getStatsSummary contacts kubelet for the container information. -func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) { +// GetStatsSummary contacts kubelet for the container information. +func GetStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) { ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) defer cancel() @@ -348,7 +348,7 @@ func getOneTimeResourceUsageOnNode( return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest) } // Get information of all containers on the node. - summary, err := getStatsSummary(c, nodeName) + summary, err := GetStatsSummary(c, nodeName) if err != nil { return nil, err } diff --git a/test/e2e/node/BUILD b/test/e2e/node/BUILD index fc4181f76d28f..9ce4e43a5a3ac 100644 --- a/test/e2e/node/BUILD +++ b/test/e2e/node/BUILD @@ -10,6 +10,7 @@ go_library( "kubelet.go", "kubelet_perf.go", "mount_propagation.go", + "node_problem_detector.go", "pod_gc.go", "pods.go", "pre_stop.go", @@ -19,6 +20,7 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/node", visibility = ["//visibility:public"], deps = [ + "//pkg/api/v1/node:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go new file mode 100644 index 0000000000000..3f3289421be27 --- /dev/null +++ b/test/e2e/node/node_problem_detector.go @@ -0,0 +1,282 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "fmt" + "net" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + nodeutil "k8s.io/kubernetes/pkg/api/v1/node" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// This test checks if node-problem-detector (NPD) runs fine without error on +// the nodes in the cluster. NPD's functionality is tested in e2e_node tests. +var _ = SIGDescribe("NodeProblemDetector", func() { + const ( + pollInterval = 1 * time.Second + pollTimeout = 1 * time.Minute + ) + f := framework.NewDefaultFramework("node-problem-detector") + + BeforeEach(func() { + framework.SkipUnlessSSHKeyPresent() + framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) + framework.SkipUnlessProviderIs("gce", "gke") + framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu") + framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) + }) + + It("should run without error", func() { + By("Getting all nodes and their SSH-able IP addresses") + nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + Expect(len(nodes.Items)).NotTo(BeZero()) + hosts := []string{} + for _, node := range nodes.Items { + for _, addr := range node.Status.Addresses { + if addr.Type == v1.NodeExternalIP { + hosts = append(hosts, net.JoinHostPort(addr.Address, "22")) + break + } + } + } + Expect(len(hosts)).To(Equal(len(nodes.Items))) + + isStandaloneMode := make(map[string]bool) + cpuUsageStats := make(map[string][]float64) + uptimeStats := make(map[string][]float64) + rssStats := make(map[string][]float64) + workingSetStats := make(map[string][]float64) + + for _, host := range hosts { + cpuUsageStats[host] = []float64{} + uptimeStats[host] = []float64{} + rssStats[host] = []float64{} + workingSetStats[host] = []float64{} + + cmd := "systemctl status node-problem-detector.service" + result, err := framework.SSH(cmd, host, framework.TestContext.Provider) + isStandaloneMode[host] = (err == nil && result.Code == 0) + + By(fmt.Sprintf("Check node %q has node-problem-detector process", host)) + // Using brackets "[n]" is a trick to prevent grep command itself from + // showing up, because string text "[n]ode-problem-detector" does not + // match regular expression "[n]ode-problem-detector". + psCmd := "ps aux | grep [n]ode-problem-detector" + result, err = framework.SSH(psCmd, host, framework.TestContext.Provider) + framework.ExpectNoError(err) + Expect(result.Code).To(BeZero()) + Expect(result.Stdout).To(ContainSubstring("node-problem-detector")) + + By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host)) + journalctlCmd := "sudo journalctl -u node-problem-detector" + result, err = framework.SSH(journalctlCmd, host, framework.TestContext.Provider) + framework.ExpectNoError(err) + Expect(result.Code).To(BeZero()) + Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed")) + + if isStandaloneMode[host] { + cpuUsage, uptime := getCpuStat(f, host) + cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) + uptimeStats[host] = append(uptimeStats[host], uptime) + } + + By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host)) + log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds." + injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\"" + _, err = framework.SSH(injectLogCmd, host, framework.TestContext.Provider) + framework.ExpectNoError(err) + Expect(result.Code).To(BeZero()) + } + + By("Check node-problem-detector can post conditions and events to API server") + for _, node := range nodes.Items { + By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name)) + Eventually(func() error { + return verifyNodeCondition(f, "KernelDeadlock", v1.ConditionTrue, "AUFSUmountHung", node.Name) + }, pollTimeout, pollInterval).Should(Succeed()) + + By(fmt.Sprintf("Check node-problem-detector posted AUFSUmountHung event on node %q", node.Name)) + eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()} + Eventually(func() error { + return verifyEvents(f, eventListOptions, 1, "AUFSUmountHung", node.Name) + }, pollTimeout, pollInterval).Should(Succeed()) + } + + By("Gather node-problem-detector cpu and memory stats") + numIterations := 60 + for i := 1; i <= numIterations; i++ { + for j, host := range hosts { + if isStandaloneMode[host] { + rss, workingSet := getMemoryStat(f, host) + rssStats[host] = append(rssStats[host], rss) + workingSetStats[host] = append(workingSetStats[host], workingSet) + if i == numIterations { + cpuUsage, uptime := getCpuStat(f, host) + cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) + uptimeStats[host] = append(uptimeStats[host], uptime) + } + } else { + cpuUsage, rss, workingSet := getNpdPodStat(f, nodes.Items[j].Name) + cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) + rssStats[host] = append(rssStats[host], rss) + workingSetStats[host] = append(workingSetStats[host], workingSet) + } + } + time.Sleep(time.Second) + } + + cpuStatsMsg := "CPU (core):" + rssStatsMsg := "RSS (MB):" + workingSetStatsMsg := "WorkingSet (MB):" + for i, host := range hosts { + if isStandaloneMode[host] { + // When in standalone mode, NPD is running as systemd service. We + // calculate its cpu usage from cgroup cpuacct value differences. + cpuUsage := cpuUsageStats[host][1] - cpuUsageStats[host][0] + totaltime := uptimeStats[host][1] - uptimeStats[host][0] + cpuStatsMsg += fmt.Sprintf(" %s[%.3f];", nodes.Items[i].Name, cpuUsage/totaltime) + } else { + sort.Float64s(cpuUsageStats[host]) + cpuStatsMsg += fmt.Sprintf(" %s[%.3f|%.3f|%.3f];", nodes.Items[i].Name, + cpuUsageStats[host][0], cpuUsageStats[host][len(cpuUsageStats[host])/2], cpuUsageStats[host][len(cpuUsageStats[host])-1]) + } + + sort.Float64s(rssStats[host]) + rssStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name, + rssStats[host][0], rssStats[host][len(rssStats[host])/2], rssStats[host][len(rssStats[host])-1]) + + sort.Float64s(workingSetStats[host]) + workingSetStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name, + workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1]) + } + framework.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg) + }) +}) + +func verifyEvents(f *framework.Framework, options metav1.ListOptions, num int, reason, nodeName string) error { + events, err := f.ClientSet.CoreV1().Events(metav1.NamespaceDefault).List(options) + if err != nil { + return err + } + count := 0 + for _, event := range events.Items { + if event.Reason != reason || event.Source.Host != nodeName { + continue + } + count += int(event.Count) + } + if count != num { + return fmt.Errorf("expect event number %d, got %d: %v", num, count, events.Items) + } + return nil +} + +func verifyNodeCondition(f *framework.Framework, condition v1.NodeConditionType, status v1.ConditionStatus, reason, nodeName string) error { + node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return err + } + _, c := nodeutil.GetNodeCondition(&node.Status, condition) + if c == nil { + return fmt.Errorf("node condition %q not found", condition) + } + if c.Status != status || c.Reason != reason { + return fmt.Errorf("unexpected node condition %q: %+v", condition, c) + } + return nil +} + +func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64) { + memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat" + result, err := framework.SSH(memCmd, host, framework.TestContext.Provider) + framework.ExpectNoError(err) + Expect(result.Code).To(BeZero()) + lines := strings.Split(result.Stdout, "\n") + + memoryUsage, err := strconv.ParseFloat(lines[0], 64) + Expect(err).To(BeNil()) + + var totalInactiveFile float64 + for _, line := range lines[1:] { + tokens := strings.Split(line, " ") + if tokens[0] == "total_rss" { + rss, err = strconv.ParseFloat(tokens[1], 64) + Expect(err).To(BeNil()) + } + if tokens[0] == "total_inactive_file" { + totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64) + Expect(err).To(BeNil()) + } + } + + workingSet = memoryUsage + if workingSet < totalInactiveFile { + workingSet = 0 + } else { + workingSet -= totalInactiveFile + } + + // Convert to MB + rss = rss / 1024 / 1024 + workingSet = workingSet / 1024 / 1024 + return +} + +func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) { + cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" + result, err := framework.SSH(cpuCmd, host, framework.TestContext.Provider) + framework.ExpectNoError(err) + Expect(result.Code).To(BeZero()) + lines := strings.Split(result.Stdout, "\n") + + usage, err = strconv.ParseFloat(lines[0], 64) + uptime, err = strconv.ParseFloat(lines[1], 64) + + // Convert from nanoseconds to seconds + usage *= 1e-9 + return +} + +func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, workingSet float64) { + summary, err := framework.GetStatsSummary(f.ClientSet, nodeName) + framework.ExpectNoError(err) + + hasNpdPod := false + for _, pod := range summary.Pods { + if !strings.HasPrefix(pod.PodRef.Name, "npd") { + continue + } + cpuUsage = float64(*pod.CPU.UsageNanoCores) * 1e-9 + rss = float64(*pod.Memory.RSSBytes) / 1024 / 1024 + workingSet = float64(*pod.Memory.WorkingSetBytes) / 1024 / 1024 + hasNpdPod = true + break + } + Expect(hasNpdPod).To(BeTrue()) + return +} From 088d270522c6da6f725e540d2240a8451d6cfefd Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Thu, 7 Feb 2019 15:41:16 -0800 Subject: [PATCH 37/96] allows configuring NPD image version in node e2e test and fix the test --- hack/make-rules/test-e2e-node.sh | 7 +-- test/e2e/framework/test_context.go | 3 ++ test/e2e_node/conformance/build/Dockerfile | 5 +- test/e2e_node/e2e_node_suite_test.go | 23 ++++++++- test/e2e_node/image_list.go | 19 ++++++- .../conformance/conformance-jenkins.sh | 3 +- test/e2e_node/jenkins/e2e-node-jenkins.sh | 3 +- test/e2e_node/node_problem_detector_linux.go | 51 ++++++++++++------- test/e2e_node/remote/cadvisor_e2e.go | 2 +- test/e2e_node/remote/node_conformance.go | 6 +-- test/e2e_node/remote/node_e2e.go | 6 +-- test/e2e_node/remote/remote.go | 4 +- test/e2e_node/remote/types.go | 3 +- test/e2e_node/runner/local/run_local.go | 3 +- test/e2e_node/runner/remote/run_remote.go | 3 +- 15 files changed, 103 insertions(+), 38 deletions(-) diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh index 2e5c95ae2646b..451486ca39c81 100755 --- a/hack/make-rules/test-e2e-node.sh +++ b/hack/make-rules/test-e2e-node.sh @@ -34,6 +34,7 @@ image_service_endpoint=${IMAGE_SERVICE_ENDPOINT:-""} run_until_failure=${RUN_UNTIL_FAILURE:-"false"} test_args=${TEST_ARGS:-""} system_spec_name=${SYSTEM_SPEC_NAME:-} +extra_envs=${EXTRA_ENVS:-} # Parse the flags to pass to ginkgo ginkgoflags="" @@ -148,7 +149,7 @@ if [ $remote = true ] ; then --image-project="$image_project" --instance-name-prefix="$instance_prefix" \ --delete-instances="$delete_instances" --test_args="$test_args" --instance-metadata="$metadata" \ --image-config-file="$image_config_file" --system-spec-name="$system_spec_name" \ - --test-suite="$test_suite" \ + --extra-envs="$extra_envs" --test-suite="$test_suite" \ 2>&1 | tee -i "${artifacts}/build-log.txt" exit $? @@ -169,8 +170,8 @@ else # Test using the host the script was run on # Provided for backwards compatibility go run test/e2e_node/runner/local/run_local.go \ - --system-spec-name="$system_spec_name" --ginkgo-flags="$ginkgoflags" \ - --test-flags="--container-runtime=${runtime} \ + --system-spec-name="$system_spec_name" --extra-envs="$extra_envs" \ + --ginkgo-flags="$ginkgoflags" --test-flags="--container-runtime=${runtime} \ --alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \ $test_args" --build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt" exit $? diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index a8c3c04c21d8d..0f2d940848e37 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -162,6 +162,8 @@ type NodeTestContextType struct { // the node e2e test. If empty, the default one (system.DefaultSpec) is // used. The system specs are in test/e2e_node/system/specs/. SystemSpecName string + // ExtraEnvs is a map of environment names to values. + ExtraEnvs map[string]string } // StorageConfig contains the shared settings for storage 2e2 tests. @@ -304,6 +306,7 @@ func RegisterNodeFlags() { flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.") flag.StringVar(&TestContext.ImageDescription, "image-description", "", "The description of the image which the test will be running on.") flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.") + flag.Var(utilflag.NewMapStringString(&TestContext.ExtraEnvs), "extra-envs", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2") } func RegisterStorageFlags() { diff --git a/test/e2e_node/conformance/build/Dockerfile b/test/e2e_node/conformance/build/Dockerfile index 5783726a08b36..288649683f209 100644 --- a/test/e2e_node/conformance/build/Dockerfile +++ b/test/e2e_node/conformance/build/Dockerfile @@ -27,12 +27,14 @@ COPY_SYSTEM_SPEC_FILE # REPORT_PATH is the path in the container to save test result and logs. # FLAKE_ATTEMPTS is the time to retry when there is a test failure. By default 2. # TEST_ARGS is the test arguments passed into the test. +# EXTRA_ENVS is the extra environment variables needed for node e2e tests. ENV FOCUS="\[Conformance\]" \ SKIP="\[Flaky\]|\[Serial\]" \ PARALLELISM=8 \ REPORT_PATH="/var/result" \ FLAKE_ATTEMPTS=2 \ - TEST_ARGS="" + TEST_ARGS="" \ + EXTRA_ENVS="" ENTRYPOINT ginkgo --focus="$FOCUS" \ --skip="$SKIP" \ @@ -46,4 +48,5 @@ ENTRYPOINT ginkgo --focus="$FOCUS" \ --system-spec-name=SYSTEM_SPEC_NAME \ # This is a placeholder that will be substituted in the Makefile. --system-spec-file=SYSTEM_SPEC_FILE_PATH \ + --extra-envs=$EXTRA_ENVS \ $TEST_ARGS diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index 91f3586ad4c23..eb6a46ff97bf9 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -76,6 +76,7 @@ func init() { func TestMain(m *testing.M) { pflag.Parse() framework.AfterReadingAllFlags(&framework.TestContext) + setExtraEnvs() os.Exit(m.Run()) } @@ -146,6 +147,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { // This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling. if framework.TestContext.PrepullImages { glog.Infof("Pre-pulling images so that they are cached for the tests.") + updateImageWhiteList() err := PrePullAllImages() Expect(err).ShouldNot(HaveOccurred()) } @@ -244,6 +246,9 @@ func waitForNodeReady() { // TODO(random-liu): Using dynamic kubelet configuration feature to // update test context with node configuration. func updateTestContext() error { + setExtraEnvs() + updateImageWhiteList() + client, err := getAPIServerClient() if err != nil { return fmt.Errorf("failed to get apiserver client: %v", err) @@ -261,7 +266,7 @@ func updateTestContext() error { if err != nil { return fmt.Errorf("failed to get kubelet configuration: %v", err) } - framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config. + framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config return nil } @@ -309,3 +314,19 @@ func loadSystemSpecFromFile(filename string) (*system.SysSpec, error) { } return spec, nil } + +// isNodeReady returns true if a node is ready; false otherwise. +func isNodeReady(node *v1.Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == v1.NodeReady { + return c.Status == v1.ConditionTrue + } + } + return false +} + +func setExtraEnvs() { + for name, value := range framework.TestContext.ExtraEnvs { + os.Setenv(name, value) + } +} diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index e7bc3f9baee8a..5a40e2eacf826 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -18,6 +18,7 @@ package e2e_node import ( "fmt" + "os" "os/exec" "os/user" "time" @@ -46,7 +47,6 @@ var NodeImageWhiteList = sets.NewString( "k8s.gcr.io/stress:v1", busyboxImage, "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff", - "k8s.gcr.io/node-problem-detector:v0.4.1", imageutils.GetE2EImage(imageutils.Nginx), imageutils.GetE2EImage(imageutils.ServeHostname), imageutils.GetE2EImage(imageutils.Netexec), @@ -58,9 +58,24 @@ var NodeImageWhiteList = sets.NewString( "gcr.io/kubernetes-e2e-test-images/node-perf/tf-wide-deep-amd64:1.0", ) -func init() { +// updateImageWhiteList updates the framework.ImageWhiteList with +// 1. the hard coded lists +// 2. the ones passed in from framework.TestContext.ExtraEnvs +// So this function needs to be called after the extra envs are applied. +func updateImageWhiteList() { // Union NodeImageWhiteList and CommonImageWhiteList into the framework image white list. framework.ImageWhiteList = NodeImageWhiteList.Union(commontest.CommonImageWhiteList) + // Images from extra envs + framework.ImageWhiteList.Insert(getNodeProblemDetectorImage()) +} + +func getNodeProblemDetectorImage() string { + const defaultImage string = "k8s.gcr.io/node-problem-detector:v0.6.2" + image := os.Getenv("NODE_PROBLEM_DETECTOR_IMAGE") + if image == "" { + image = defaultImage + } + return image } // puller represents a generic image puller diff --git a/test/e2e_node/jenkins/conformance/conformance-jenkins.sh b/test/e2e_node/jenkins/conformance/conformance-jenkins.sh index 9e8715287cf1d..7758d0b2df664 100755 --- a/test/e2e_node/jenkins/conformance/conformance-jenkins.sh +++ b/test/e2e_node/jenkins/conformance/conformance-jenkins.sh @@ -40,4 +40,5 @@ go run test/e2e_node/runner/remote/run_remote.go --test-suite=conformance \ --results-dir="$ARTIFACTS" --test-timeout="$TIMEOUT" \ --test_args="--kubelet-flags=\"$KUBELET_ARGS\"" \ --instance-metadata="$GCE_INSTANCE_METADATA" \ - --system-spec-name="$SYSTEM_SPEC_NAME" + --system-spec-name="$SYSTEM_SPEC_NAME" \ + --extra-envs="$EXTRA_ENVS" diff --git a/test/e2e_node/jenkins/e2e-node-jenkins.sh b/test/e2e_node/jenkins/e2e-node-jenkins.sh index a1caae4ad95f4..99a4ac14bc388 100755 --- a/test/e2e_node/jenkins/e2e-node-jenkins.sh +++ b/test/e2e_node/jenkins/e2e-node-jenkins.sh @@ -47,4 +47,5 @@ go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 \ --image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \ --results-dir="$ARTIFACTS" --ginkgo-flags="--nodes=$PARALLELISM $GINKGO_FLAGS" \ --test-timeout="$TIMEOUT" --test_args="$TEST_ARGS --kubelet-flags=\"$KUBELET_ARGS\"" \ - --instance-metadata="$GCE_INSTANCE_METADATA" --system-spec-name="$SYSTEM_SPEC_NAME" + --instance-metadata="$GCE_INSTANCE_METADATA" --system-spec-name="$SYSTEM_SPEC_NAME" \ + --extra-envs="$EXTRA_ENVS" diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 36a63193c9e28..a6a248abcf972 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -45,13 +45,14 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete pollInterval = 1 * time.Second pollConsistent = 5 * time.Second pollTimeout = 1 * time.Minute - image = "k8s.gcr.io/node-problem-detector:v0.4.1" ) f := framework.NewDefaultFramework("node-problem-detector") var c clientset.Interface var uid string var ns, name, configName, eventNamespace string var bootTime, nodeTime time.Time + var image string + BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name @@ -60,6 +61,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete configName = "node-problem-detector-config-" + uid // There is no namespace for Node, event recorder will set default namespace for node events. eventNamespace = metav1.NamespaceDefault + image = getNodeProblemDetectorImage() + By(fmt.Sprintf("Using node-problem-detector image: %s", image)) }) // Test system log monitor. We may add other tests if we have more problem daemons in the future. @@ -245,7 +248,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp time.Time message string messageNum int - events int + tempEvents int // Events for temp errors + totalEvents int // Events for both temp errors and condition changes conditionReason string conditionMessage string conditionType v1.ConditionStatus @@ -279,7 +283,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp: nodeTime, message: tempMessage, messageNum: 3, - events: 3, + tempEvents: 3, + totalEvents: 3, conditionReason: defaultReason, conditionMessage: defaultMessage, conditionType: v1.ConditionFalse, @@ -289,7 +294,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp: nodeTime, message: permMessage1, messageNum: 1, - events: 3, // event number should not change + tempEvents: 3, // event number for temp errors should not change + totalEvents: 4, // add 1 event for condition change conditionReason: permReason1, conditionMessage: permMessage1, conditionType: v1.ConditionTrue, @@ -299,7 +305,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp: nodeTime.Add(5 * time.Minute), message: tempMessage, messageNum: 3, - events: 6, + tempEvents: 6, // add 3 events for temp errors + totalEvents: 7, // add 3 events for temp errors conditionReason: permReason1, conditionMessage: permMessage1, conditionType: v1.ConditionTrue, @@ -309,7 +316,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp: nodeTime.Add(5 * time.Minute), message: permMessage1 + "different message", messageNum: 1, - events: 6, // event number should not change + tempEvents: 6, // event number should not change + totalEvents: 7, // event number should not change conditionReason: permReason1, conditionMessage: permMessage1, conditionType: v1.ConditionTrue, @@ -319,7 +327,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete timestamp: nodeTime.Add(5 * time.Minute), message: permMessage2, messageNum: 1, - events: 6, // event number should not change + tempEvents: 6, // event number for temp errors should not change + totalEvents: 8, // add 1 event for condition change conditionReason: permReason2, conditionMessage: permMessage2, conditionType: v1.ConditionTrue, @@ -332,13 +341,17 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete Expect(err).NotTo(HaveOccurred()) } - By(fmt.Sprintf("Wait for %d events generated", test.events)) + By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents)) + Eventually(func() error { + return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.tempEvents, tempReason, tempMessage) + }, pollTimeout, pollInterval).Should(Succeed()) + By(fmt.Sprintf("Wait for %d total events generated", test.totalEvents)) Eventually(func() error { - return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) + return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) }, pollTimeout, pollInterval).Should(Succeed()) - By(fmt.Sprintf("Make sure only %d events generated", test.events)) + By(fmt.Sprintf("Make sure only %d total events generated", test.totalEvents)) Consistently(func() error { - return verifyEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.events, tempReason, tempMessage) + return verifyTotalEvents(c.CoreV1().Events(eventNamespace), eventListOptions, test.totalEvents) }, pollConsistent, pollInterval).Should(Succeed()) By(fmt.Sprintf("Make sure node condition %q is set", condition)) @@ -390,7 +403,7 @@ func injectLog(file string, timestamp time.Time, log string, num int) error { return nil } -// verifyEvents verifies there are num specific events generated +// verifyEvents verifies there are num specific events generated with given reason and message. func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int, reason, message string) error { events, err := e.List(options) if err != nil { @@ -399,7 +412,7 @@ func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, nu count := 0 for _, event := range events.Items { if event.Reason != reason || event.Message != message { - return fmt.Errorf("unexpected event: %v", event) + continue } count += int(event.Count) } @@ -409,14 +422,18 @@ func verifyEvents(e coreclientset.EventInterface, options metav1.ListOptions, nu return nil } -// verifyNoEvents verifies there is no event generated -func verifyNoEvents(e coreclientset.EventInterface, options metav1.ListOptions) error { +// verifyTotalEvents verifies there are num events in total. +func verifyTotalEvents(e coreclientset.EventInterface, options metav1.ListOptions, num int) error { events, err := e.List(options) if err != nil { return err } - if len(events.Items) != 0 { - return fmt.Errorf("unexpected events: %v", events.Items) + count := 0 + for _, event := range events.Items { + count += int(event.Count) + } + if count != num { + return fmt.Errorf("expect event number %d, got %d: %v", num, count, events.Items) } return nil } diff --git a/test/e2e_node/remote/cadvisor_e2e.go b/test/e2e_node/remote/cadvisor_e2e.go index 8bdb567d031a5..76ae79aff5048 100644 --- a/test/e2e_node/remote/cadvisor_e2e.go +++ b/test/e2e_node/remote/cadvisor_e2e.go @@ -63,7 +63,7 @@ func runCommand(command string, args ...string) error { } // RunTest implements TestSuite.RunTest -func (n *CAdvisorE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) { +func (n *CAdvisorE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs string, timeout time.Duration) (string, error) { // Kill any running node processes cleanupNodeProcesses(host) diff --git a/test/e2e_node/remote/node_conformance.go b/test/e2e_node/remote/node_conformance.go index 9c78ae30887c3..3a6cf98ae44af 100644 --- a/test/e2e_node/remote/node_conformance.go +++ b/test/e2e_node/remote/node_conformance.go @@ -259,7 +259,7 @@ func stopKubelet(host, workspace string) error { } // RunTest runs test on the node. -func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, _, systemSpecName string, timeout time.Duration) (string, error) { +func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, _, systemSpecName, extraEnvs string, timeout time.Duration) (string, error) { // Install the cni plugins and add a basic CNI configuration. if err := setupCNI(host, workspace); err != nil { return "", err @@ -293,8 +293,8 @@ func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFi // Run the tests glog.V(2).Infof("Starting tests on %q", host) podManifestPath := getPodPath(workspace) - cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'", - timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName)) + cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s -e EXTRA_ENVS=%s %s'", + timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, extraEnvs, getConformanceTestImageName(systemSpecName)) testOutput, err := SSH(host, "sh", "-c", cmd) if err != nil { return testOutput, err diff --git a/test/e2e_node/remote/node_e2e.go b/test/e2e_node/remote/node_e2e.go index d6989740a3366..07f1b4300f224 100644 --- a/test/e2e_node/remote/node_e2e.go +++ b/test/e2e_node/remote/node_e2e.go @@ -135,7 +135,7 @@ func updateOSSpecificKubeletFlags(args, host, workspace string) (string, error) } // RunTest runs test on the node. -func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) { +func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs string, timeout time.Duration) (string, error) { // Install the cni plugins and add a basic CNI configuration. // TODO(random-liu): Do this in cloud init after we remove containervm test. if err := setupCNI(host, workspace); err != nil { @@ -164,8 +164,8 @@ func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePr glog.V(2).Infof("Starting tests on %q", host) cmd := getSSHCommand(" && ", fmt.Sprintf("cd %s", workspace), - fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s --image-description=\"%s\" %s", - timeout.Seconds(), ginkgoArgs, systemSpecName, systemSpecFile, host, results, junitFilePrefix, imageDesc, testArgs), + fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --extra-envs=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s --image-description=\"%s\" %s", + timeout.Seconds(), ginkgoArgs, systemSpecName, systemSpecFile, extraEnvs, host, results, junitFilePrefix, imageDesc, testArgs), ) return SSH(host, "sh", "-c", cmd) } diff --git a/test/e2e_node/remote/remote.go b/test/e2e_node/remote/remote.go index 746899f8b57b7..47501d2977487 100644 --- a/test/e2e_node/remote/remote.go +++ b/test/e2e_node/remote/remote.go @@ -65,7 +65,7 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) { // Returns the command output, whether the exit was ok, and any errors // TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name. -func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) { +func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string, extraEnvs string) (string, bool, error) { // Create the temp staging directory glog.V(2).Infof("Staging test binaries on %q", host) workspace := newWorkspaceDir() @@ -110,7 +110,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image } glog.V(2).Infof("Running test on %q", host) - output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, *testTimeoutSeconds) + output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs, *testTimeoutSeconds) aggErrs := []error{} // Do not log the output here, let the caller deal with the test output. diff --git a/test/e2e_node/remote/types.go b/test/e2e_node/remote/types.go index f7e360f7440bc..33d36fca5e9af 100644 --- a/test/e2e_node/remote/types.go +++ b/test/e2e_node/remote/types.go @@ -46,6 +46,7 @@ type TestSuite interface { // * ginkgoArgs is the arguments passed to ginkgo. // * systemSpecName is the name of the system spec used for validating the // image on which the test runs. + // * extraEnvs is the extra environment variables needed for node e2e tests. // * timeout is the test timeout. - RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName string, timeout time.Duration) (string, error) + RunTest(host, workspace, results, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs string, timeout time.Duration) (string, error) } diff --git a/test/e2e_node/runner/local/run_local.go b/test/e2e_node/runner/local/run_local.go index bb3d95c6d9ef2..3a079ae98f2cb 100644 --- a/test/e2e_node/runner/local/run_local.go +++ b/test/e2e_node/runner/local/run_local.go @@ -35,6 +35,7 @@ var buildDependencies = flag.Bool("build-dependencies", true, "If true, build al var ginkgoFlags = flag.String("ginkgo-flags", "", "Space-separated list of arguments to pass to Ginkgo test runner.") var testFlags = flag.String("test-flags", "", "Space-separated list of arguments to pass to node e2e test.") var systemSpecName = flag.String("system-spec-name", "", fmt.Sprintf("The name of the system spec used for validating the image in the node conformance test. The specs are at %s. If unspecified, the default built-in spec (system.DefaultSpec) will be used.", system.SystemSpecPath)) +var extraEnvs = flag.String("extra-envs", "", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2") func main() { flag.Parse() @@ -62,7 +63,7 @@ func main() { glog.Fatalf("Failed to get k8s root directory: %v", err) } systemSpecFile := filepath.Join(rootDir, system.SystemSpecPath, *systemSpecName+".yaml") - args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s", *systemSpecName, systemSpecFile)) + args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s --extra-envs=%s", *systemSpecName, systemSpecFile, *extraEnvs)) } if err := runCommand(ginkgo, args...); err != nil { glog.Exitf("Test failed: %v", err) diff --git a/test/e2e_node/runner/remote/run_remote.go b/test/e2e_node/runner/remote/run_remote.go index ce96c9de055b6..cad0c688bfa4a 100644 --- a/test/e2e_node/runner/remote/run_remote.go +++ b/test/e2e_node/runner/remote/run_remote.go @@ -63,6 +63,7 @@ var instanceMetadata = flag.String("instance-metadata", "", "key/value metadata var gubernator = flag.Bool("gubernator", false, "If true, output Gubernator link to view logs") var ginkgoFlags = flag.String("ginkgo-flags", "", "Passed to ginkgo to specify additional flags such as --skip=.") var systemSpecName = flag.String("system-spec-name", "", fmt.Sprintf("The name of the system spec used for validating the image in the node conformance test. The specs are at %s. If unspecified, the default built-in spec (system.DefaultSpec) will be used.", system.SystemSpecPath)) +var extraEnvs = flag.String("extra-envs", "", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2") // envs is the type used to collect all node envs. The key is the env name, // and the value is the env value @@ -441,7 +442,7 @@ func testHost(host string, deleteFiles bool, imageDesc, junitFilePrefix, ginkgoF } } - output, exitOk, err := remote.RunRemote(suite, path, host, deleteFiles, imageDesc, junitFilePrefix, *testArgs, ginkgoFlagsStr, *systemSpecName) + output, exitOk, err := remote.RunRemote(suite, path, host, deleteFiles, imageDesc, junitFilePrefix, *testArgs, ginkgoFlagsStr, *systemSpecName, *extraEnvs) return &TestResult{ output: output, err: err, From fa15cfe2503962100cc2d3ab459c3fb331fe84d7 Mon Sep 17 00:00:00 2001 From: Michelle Au Date: Thu, 28 Feb 2019 14:44:48 -0800 Subject: [PATCH 38/96] bump repd min size in e2es --- test/e2e/storage/regional_pd.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index aae4b56ccd9b6..e154b05fac2c9 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -27,7 +27,7 @@ import ( "encoding/json" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,6 +50,7 @@ const ( pvDeletionTimeout = 3 * time.Minute statefulSetReadyTimeout = 3 * time.Minute taintKeyPrefix = "zoneTaint_" + repdMinSize = "200Gi" ) var _ = utils.SIGDescribe("Regional PD", func() { @@ -105,8 +106,8 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { "zones": strings.Join(cloudZones, ","), "replication-type": "regional-pd", }, - claimSize: "1.5Gi", - expectedSize: "2Gi", + claimSize: repdMinSize, + expectedSize: repdMinSize, pvCheck: func(volume *v1.PersistentVolume) error { err := checkGCEPD(volume, "pd-standard") if err != nil { @@ -123,8 +124,8 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { "type": "pd-standard", "replication-type": "regional-pd", }, - claimSize: "1.5Gi", - expectedSize: "2Gi", + claimSize: repdMinSize, + expectedSize: repdMinSize, pvCheck: func(volume *v1.PersistentVolume) error { err := checkGCEPD(volume, "pd-standard") if err != nil { @@ -304,7 +305,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string) { "type": "pd-standard", "replication-type": "regional-pd", }, - claimSize: "2Gi", + claimSize: repdMinSize, delayBinding: true, } @@ -331,8 +332,8 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { "type": "pd-standard", "replication-type": "regional-pd", }, - claimSize: "2Gi", - expectedSize: "2Gi", + claimSize: repdMinSize, + expectedSize: repdMinSize, } suffix := "topo-regional" @@ -353,7 +354,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s "type": "pd-standard", "replication-type": "regional-pd", }, - claimSize: "2Gi", + claimSize: repdMinSize, delayBinding: true, } From aa026924f51c73509534de3b59d840d484fe7e02 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Mon, 25 Mar 2019 02:41:05 +0000 Subject: [PATCH 39/96] Kubernetes version v1.12.8-beta.0 openapi-spec file updates --- api/openapi-spec/swagger.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index c3197d70b265b..4a867e381aab5 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubernetes", - "version": "v1.12.7" + "version": "v1.12.8" }, "paths": { "/api/": { From ef2a79379d427b8bc3f1310d4295ae5dca288bc4 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Mon, 25 Mar 2019 04:10:16 +0000 Subject: [PATCH 40/96] Add/Update CHANGELOG-1.12.md for v1.12.7. --- CHANGELOG-1.12.md | 177 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 134 insertions(+), 43 deletions(-) diff --git a/CHANGELOG-1.12.md b/CHANGELOG-1.12.md index 46619f99e70a6..ce04fceab3928 100644 --- a/CHANGELOG-1.12.md +++ b/CHANGELOG-1.12.md @@ -1,52 +1,59 @@ -- [v1.12.6](#v1126) - - [Downloads for v1.12.6](#downloads-for-v1126) +- [v1.12.7](#v1127) + - [Downloads for v1.12.7](#downloads-for-v1127) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.12.5](#changelog-since-v1125) + - [Changelog since v1.12.6](#changelog-since-v1126) - [Other notable changes](#other-notable-changes) -- [v1.12.5](#v1125) - - [Downloads for v1.12.5](#downloads-for-v1125) +- [v1.12.6](#v1126) + - [Downloads for v1.12.6](#downloads-for-v1126) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.12.4](#changelog-since-v1124) + - [Changelog since v1.12.5](#changelog-since-v1125) - [Other notable changes](#other-notable-changes-1) -- [v1.12.4](#v1124) - - [Downloads for v1.12.4](#downloads-for-v1124) +- [v1.12.5](#v1125) + - [Downloads for v1.12.5](#downloads-for-v1125) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.12.3](#changelog-since-v1123) - - [Action Required](#action-required) + - [Changelog since v1.12.4](#changelog-since-v1124) - [Other notable changes](#other-notable-changes-2) -- [v1.12.3](#v1123) - - [Downloads for v1.12.3](#downloads-for-v1123) +- [v1.12.4](#v1124) + - [Downloads for v1.12.4](#downloads-for-v1124) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.12.2](#changelog-since-v1122) + - [Changelog since v1.12.3](#changelog-since-v1123) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-3) -- [v1.12.2](#v1122) - - [Downloads for v1.12.2](#downloads-for-v1122) +- [v1.12.3](#v1123) + - [Downloads for v1.12.3](#downloads-for-v1123) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.12.1](#changelog-since-v1121) + - [Changelog since v1.12.2](#changelog-since-v1122) - [Other notable changes](#other-notable-changes-4) -- [v1.12.1](#v1121) - - [Downloads for v1.12.1](#downloads-for-v1121) +- [v1.12.2](#v1122) + - [Downloads for v1.12.2](#downloads-for-v1122) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.12.0](#changelog-since-v1120) + - [Changelog since v1.12.1](#changelog-since-v1121) - [Other notable changes](#other-notable-changes-5) -- [v1.12.0](#v1120) - - [Downloads for v1.12.0](#downloads-for-v1120) +- [v1.12.1](#v1121) + - [Downloads for v1.12.1](#downloads-for-v1121) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) + - [Changelog since v1.12.0](#changelog-since-v1120) + - [Other notable changes](#other-notable-changes-6) +- [v1.12.0](#v1120) + - [Downloads for v1.12.0](#downloads-for-v1120) + - [Client Binaries](#client-binaries-7) + - [Server Binaries](#server-binaries-7) + - [Node Binaries](#node-binaries-7) - [Known Issues](#known-issues) - [Major Themes](#major-themes) - [SIG API Machinery](#sig-api-machinery) @@ -68,7 +75,7 @@ - [Deprecations and removals](#deprecations-and-removals) - [New Features](#new-features) - [API Changes](#api-changes) - - [Other Notable Changes](#other-notable-changes-6) + - [Other Notable Changes](#other-notable-changes-7) - [SIG API Machinery](#sig-api-machinery-1) - [SIG Apps](#sig-apps) - [SIG Auth](#sig-auth) @@ -87,54 +94,138 @@ - [SIG Storage](#sig-storage-1) - [SIG VMWare](#sig-vmware-1) - [SIG Windows](#sig-windows-1) - - [Other Notable Changes](#other-notable-changes-7) + - [Other Notable Changes](#other-notable-changes-8) - [Bug Fixes](#bug-fixes) - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) - [External Dependencies](#external-dependencies) - [v1.12.0-rc.2](#v1120-rc2) - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - - [Client Binaries](#client-binaries-7) - - [Server Binaries](#server-binaries-7) - - [Node Binaries](#node-binaries-7) - - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - - [Other notable changes](#other-notable-changes-8) -- [v1.12.0-rc.1](#v1120-rc1) - - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-8) - [Server Binaries](#server-binaries-8) - [Node Binaries](#node-binaries-8) - - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) - - [Action Required](#action-required-2) + - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - [Other notable changes](#other-notable-changes-9) -- [v1.12.0-beta.2](#v1120-beta2) - - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) +- [v1.12.0-rc.1](#v1120-rc1) + - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - - [Action Required](#action-required-3) + - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-10) -- [v1.12.0-beta.1](#v1120-beta1) - - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) +- [v1.12.0-beta.2](#v1120-beta2) + - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - - [Action Required](#action-required-4) + - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-11) -- [v1.12.0-alpha.1](#v1120-alpha1) - - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) +- [v1.12.0-beta.1](#v1120-beta1) + - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-12) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Client Binaries](#client-binaries-12) + - [Server Binaries](#server-binaries-12) + - [Node Binaries](#node-binaries-12) - [Changelog since v1.11.0](#changelog-since-v1110) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-12) + - [Other notable changes](#other-notable-changes-13) +# v1.12.7 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.7 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes.tar.gz) | `6fbbaf14e8a24f6ff415068ecc2ad7e0a4103da5d65330e4d01b91d0cb1df0473092eb8982dc584d8b3fb7f1504c761fdde7daa0d312c98b1833eff2b65994bf` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-src.tar.gz) | `67b308e1124b283b1e5da8b0bb03d530868aabd0c30905698ed9be52647cbecab8452bc8392c891a69ba793d42b3f4225ffdab1492b9094367f5e1d0134f7743` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-darwin-386.tar.gz) | `f2b1b6aaaa57989bd21a5969ef86e9bc61115598c1d4587a5117a9040acae965994ed34fc1745417254b7cf34211f0c7a376cd16c9b43057769c3ac1cbc1e748` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-darwin-amd64.tar.gz) | `0967c05e48e06055276323905c29ccfd179d349bccdb03dbe1dd967fce95add2ae698294e6f5260fd4830d795311fc9145dd2258e8696a6cf81f333c99302578` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-386.tar.gz) | `e2791a7797d05b6e2885d990364b04fdf39bccda3b092d32e783eb3f60658229adce47dfdbb3480f1d50146eb386bff02d1118179f54e220c31586a123a95eff` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-amd64.tar.gz) | `da12d74a4df1dbef4a376ec233114ccc0f1477ad3ced9ca38f26967b3b24c1285924b2ea8b424cbf0af12f835ac05f90cadfb14270ed5588a7cdf1eb855408ee` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-arm.tar.gz) | `23b4ddebbbfb7488bec894ea98efc77c8c13bdc055713f1d921d82450c01d95fa43c596f7e05eec6aca77b041e761f890f02107dca85ce75773d7c5e9ec8e0d4` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-arm64.tar.gz) | `f44e343e58b1904ea0a83cb628f09720b1d3c5968474f7115ca16f68a90deaae52ecb3f34b061366f97360a51bf65b5dd0ad29e5a61acc989b9a3ac600c1f889` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-ppc64le.tar.gz) | `8171fcb1aac420083b69d99c73ebf565f178a20ffeb3f1e8243c028efaad566d51e95c24050d4eba792b13ea4244db7b7d5b474555393e4386e2c749b558aed6` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-linux-s390x.tar.gz) | `38b6ebf862e56345d1cf3b16b7ba334e8a12890498c0cbd21ab1202597c4c3bb723abb166af830586888ab477bd943733a8225e05b41a9216879bd35d7d7801a` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-windows-386.tar.gz) | `dea40564c0abed7959d7e446ed937122d773c365ca2684bc7c856eb2af2ddfecf1f16bc6a349d5c7a1f6dcf697eb1dad9e7d58cb60465f922fff6a635cf7608b` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-client-windows-amd64.tar.gz) | `847068332ef5175e8c7a8c9b9850e4b00adea0135729b060632cbeaa69f8ab0fc5f1018f674cf56ca1d2769206a476ee8f31a8dcc65950a7c3c855dc45c6a150` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-server-linux-amd64.tar.gz) | `177185cb67ca546844a188747dc8c4430900911e4d7a46136b869482e8f18c14f0160ff5ae043ebf4baa1627f75f7dd7ea6338724b5cd8095ad9b10d46410474` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-server-linux-arm.tar.gz) | `5b38eed10d5ff9ded77352de91a4c3628df9f00bda3a8791d14e27db4a286b5f6877a48f20f74f058814d664d25f457386291e51dd3d50c7ccc8a89e8bc044be` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-server-linux-arm64.tar.gz) | `bea4118d018c5ca92b8508503cdee6243f73d2c1351a2456de480609d2ddf083a784a099c53d591bc4c228872855d093ab9de330f94aff61aa3da97ef0d08039` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-server-linux-ppc64le.tar.gz) | `3424f5a22eb166b2c20393b53ce38e9c42c92a76b48f0032dc0190821f487a5a7f2e75fce9ecdbbfbca9370de637b54264ba2a6a3b40b0af43888a0db8060533` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-server-linux-s390x.tar.gz) | `52b4b5366c1f3d372c293172136a9051cb9a3297967e009df2675763403a13128466370dbc825f6bd4a44f5df5ab7e44e9c7eddbcf8bf14a6a394b7b01d85e0f` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-linux-amd64.tar.gz) | `da84f9a9a4865efc8ac50c6dcbaf5aedb2206ec3e52cd26e7ea68f202ef466abf0230f0a704a85665eb542b2e37ac266556b7df3308e9c8939223acd5b61b940` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-linux-arm.tar.gz) | `4e6e04eaada99f1c035d5634c50c7f0ae08df96c8aab734efc1f5ae5294bb566fb7c60425219c5b8cc403d35bc79f1b721914f9549c728b7823afccec8ed1881` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-linux-arm64.tar.gz) | `94ba017c3c9194d1675a2a9a340934646cbc63bc3e5acd12c0846c9c1cb93370b86d71d25e5424ec0d2212cb66e9b9ef8702cc48e289e14116d4340ffd51196a` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-linux-ppc64le.tar.gz) | `78590dda6404e8cfb0a261157081c91fad813cb028b7761fadca42ae192458ad7be49464f624d697524c686390103d479a9f753d91b48e6a81486d38f25e8fd9` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-linux-s390x.tar.gz) | `cf61444ef93a0e51b0adc7d67959f70a455ab3eed3a4b62d95aeead14c2c7835d0a3bee70d8cf33c21031cc789b756b795d63a170bd7d54359e905efda3f2b19` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.7/kubernetes-node-windows-amd64.tar.gz) | `01a4e0585375cbe562fc73cf00ac2b87ccbb4f3f62a8977c74f184ba9e53974b451b10b14b574735b9eb40a36ac38596b1f17efbd396173f251ac1e91f4e00fe` + +## Changelog since v1.12.6 + +### Other notable changes + +* Kubelet won't evict a static pod with priority `system-node-critical` upon resource pressure. ([#74222](https://github.com/kubernetes/kubernetes/pull/74222), [@Huang-Wei](https://github.com/Huang-Wei)) +* Re-issue Allocate grpc calls before starting a container that requests device-plugin resources if the cached state is missing. ([#73824](https://github.com/kubernetes/kubernetes/pull/73824), [@jiayingz](https://github.com/jiayingz)) +* Update Cluster Autoscaler version to 1.12.3. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.12.3 ([#75182](https://github.com/kubernetes/kubernetes/pull/75182), [@losipiuk](https://github.com/losipiuk)) +* Restores --username and --password flags to kubectl ([#75451](https://github.com/kubernetes/kubernetes/pull/75451), [@liggitt](https://github.com/liggitt)) +* Bump debian-iptables image to v11.0.1 to fix security vulnerabilities. ([#75077](https://github.com/kubernetes/kubernetes/pull/75077), [@grayluck](https://github.com/grayluck)) +* Prevent AWS Network Load Balancer security groups ingress rules to be deleted by ensuring target groups are tagged. ([#73594](https://github.com/kubernetes/kubernetes/pull/73594), [@masterzen](https://github.com/masterzen)) +* Ensure Azure load balancer cleaned up on 404 or 403 when deleting LoadBalancer services. ([#75256](https://github.com/kubernetes/kubernetes/pull/75256), [@feiskyer](https://github.com/feiskyer)) +* Allow disable outbound SNAT when Azure standard load balancer is used together with outbound rules. ([#75282](https://github.com/kubernetes/kubernetes/pull/75282), [@feiskyer](https://github.com/feiskyer)) +* Fix panic in kubectl cp command ([#75037](https://github.com/kubernetes/kubernetes/pull/75037), [@soltysh](https://github.com/soltysh)) +* Fix kubelet start failure issue on Azure Stack due to InstanceMetadata setting ([#74936](https://github.com/kubernetes/kubernetes/pull/74936), [@rjaini](https://github.com/rjaini)) +* fix parse devicePath issue on Azure Disk ([#74499](https://github.com/kubernetes/kubernetes/pull/74499), [@andyzhangx](https://github.com/andyzhangx)) +* fix mixed protocol issue for azure load balancer ([#74200](https://github.com/kubernetes/kubernetes/pull/74200), [@andyzhangx](https://github.com/andyzhangx)) +* fix issue: fail to detach azure disk when there is server side error ([#74398](https://github.com/kubernetes/kubernetes/pull/74398), [@andyzhangx](https://github.com/andyzhangx)) +* fix Azure Container Registry anonymous repo image pull error ([#74715](https://github.com/kubernetes/kubernetes/pull/74715), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes an issue with missing apiVersion/kind in object data sent to admission webhooks ([#74448](https://github.com/kubernetes/kubernetes/pull/74448), [@liggitt](https://github.com/liggitt)) +* fix get azure accounts timeout issue when there is no out-bound IP ([#74191](https://github.com/kubernetes/kubernetes/pull/74191), [@andyzhangx](https://github.com/andyzhangx)) +* kubelet: resolved hang/timeout issues when running large numbers of pods with unique configmap/secret references ([#74755](https://github.com/kubernetes/kubernetes/pull/74755), [@liggitt](https://github.com/liggitt)) +* Reduce memory utilization of admission webhook metrics by removing resource related labels. ([#69895](https://github.com/kubernetes/kubernetes/pull/69895), [@jpbetz](https://github.com/jpbetz)) +* This PR removes the following metrics: ([#74636](https://github.com/kubernetes/kubernetes/pull/74636), [@logicalhan](https://github.com/logicalhan)) + * reflector_items_per_list + * reflector_items_per_watch + * reflector_last_resource_version + * reflector_list_duration_seconds + * reflector_lists_total + * reflector_short_watches_total + * reflector_watch_duration_seconds + * reflector_watches_total + * While this is a backwards-incompatible change, it would have been impossible to setup reliable monitoring around these metrics since the labels were not stable. +* Fix keymutex issues which may crash in some platforms. ([#74386](https://github.com/kubernetes/kubernetes/pull/74386), [@danielqsj](https://github.com/danielqsj)) + + + # v1.12.6 [Documentation](https://docs.k8s.io) From 6ce9bb27032e2748add499725ff89dff09ed839b Mon Sep 17 00:00:00 2001 From: Fabio Rapposelli Date: Thu, 14 Mar 2019 18:57:26 +0100 Subject: [PATCH 41/96] stop vsphere cloud provider from spamming logs with `failed to patch IP` Fixes: #75236 --- pkg/cloudprovider/providers/vsphere/vsphere.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 33dd98154f76c..d51bb5c80daab 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -574,7 +574,7 @@ func getLocalIP() ([]v1.NodeAddress, error) { ) glog.V(4).Infof("Detected local IP address as %q", ipnet.IP.String()) } else { - glog.Warningf("Failed to patch IP as MAC address %q does not belong to a VMware platform", vmMACAddr) + glog.V(4).Infof("Failed to patch IP for interface %q as MAC address %q does not belong to a VMware platform", i.Name, vmMACAddr) } } } From ab8ee0eee88553c2972c8d7fc6e92e50a93ff0aa Mon Sep 17 00:00:00 2001 From: Laurent Bernaille Date: Tue, 12 Mar 2019 09:40:55 +0100 Subject: [PATCH 42/96] Do not delete existing VS and RS when starting --- pkg/proxy/ipvs/graceful_termination.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pkg/proxy/ipvs/graceful_termination.go b/pkg/proxy/ipvs/graceful_termination.go index ee5278c762312..bf2933afe9b0f 100644 --- a/pkg/proxy/ipvs/graceful_termination.go +++ b/pkg/proxy/ipvs/graceful_termination.go @@ -204,21 +204,5 @@ func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS stri // Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute func (m *GracefulTerminationManager) Run() { - // before start, add leftover in delete rs to graceful delete rsList - vss, err := m.ipvs.GetVirtualServers() - if err != nil { - glog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver") - } - for _, vs := range vss { - rss, err := m.ipvs.GetRealServers(vs) - if err != nil { - glog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs) - continue - } - for _, rs := range rss { - m.GracefulDeleteRS(vs, rs) - } - } - go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop) } From bbe46af67d429f603370afa11e10c9d8b37ef8ff Mon Sep 17 00:00:00 2001 From: Andrei Zhlobich Date: Thu, 29 Nov 2018 11:50:33 +0100 Subject: [PATCH 43/96] Fix updating 'currentMetrics' field for HPA with 'AverageValue' target --- pkg/controller/podautoscaler/horizontal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 288c13f0b5df4..a39ca46b22cf9 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -377,7 +377,7 @@ func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas in return 0, time.Time{}, "", fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err) } metricNameProposal := fmt.Sprintf("%s resource", metricSpec.Resource.Name) - status = &autoscalingv2.MetricStatus{ + *status = autoscalingv2.MetricStatus{ Type: autoscalingv2.ResourceMetricSourceType, Resource: &autoscalingv2.ResourceMetricStatus{ Name: metricSpec.Resource.Name, From 96b3173becf880ef4b0d20b54c8abfc2ae9ee270 Mon Sep 17 00:00:00 2001 From: Solly Ross Date: Thu, 4 Oct 2018 12:48:18 -0400 Subject: [PATCH 44/96] Populate ClientCA in delegating auth setup kubernetes/kubernetes#67768 accidentally removed population of the the ClientCA in the delegating auth setup code. This restores it. --- .../src/k8s.io/apiserver/pkg/server/options/authentication.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 5c8209c35d0b3..043a934153812 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -192,9 +192,11 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(c *server.AuthenticationInfo, } // configure AuthenticationInfo config + cfg.ClientCAFile = s.ClientCert.ClientCA if err = c.ApplyClientCert(s.ClientCert.ClientCA, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) } + cfg.RequestHeaderConfig = s.RequestHeader.ToAuthenticationRequestHeaderConfig() if err = c.ApplyClientCert(s.RequestHeader.ClientCAFile, servingInfo); err != nil { return fmt.Errorf("unable to load client CA file: %v", err) From 3e45a05af48e05f76110f0b1054189129b7768fd Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Thu, 14 Mar 2019 09:35:00 +0100 Subject: [PATCH 45/96] Update gcp images with security patches [stackdriver addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.1 to pick up security fixes. [fluentd-gcp addon] Bump event-exporter to v0.2.4 to pick up security fixes. [fluentd-gcp addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. [metatada-proxy addon] Bump prometheus-to-sd v0.5.0 to pick up security fixes. --- .../stackdriver/heapster-controller.yaml | 2 +- cluster/addons/fluentd-gcp/event-exporter.yaml | 10 +++++----- cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 2 +- cluster/addons/fluentd-gcp/scaler-deployment.yaml | 4 ++-- cluster/addons/metadata-proxy/gce/metadata-proxy.yaml | 2 +- .../monitoring/custom_metrics_deployments.go | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml index e890b1fbcedfe..ed0b391399893 100644 --- a/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/stackdriver/heapster-controller.yaml @@ -63,7 +63,7 @@ spec: - --sink=stackdriver:?cluster_name={{ cluster_name }}&use_old_resources={{ use_old_resources }}&use_new_resources={{ use_new_resources }}&min_interval_sec=100&batch_export_timeout_sec=110&cluster_location={{ cluster_location }} # BEGIN_PROMETHEUS_TO_SD - name: prom-to-sd - image: k8s.gcr.io/prometheus-to-sd:v0.3.1 + image: k8s.gcr.io/prometheus-to-sd:v0.5.0 command: - /monitor - --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count diff --git a/cluster/addons/fluentd-gcp/event-exporter.yaml b/cluster/addons/fluentd-gcp/event-exporter.yaml index cb914c721b3a7..8125618b0fb57 100644 --- a/cluster/addons/fluentd-gcp/event-exporter.yaml +++ b/cluster/addons/fluentd-gcp/event-exporter.yaml @@ -29,11 +29,11 @@ subjects: apiVersion: apps/v1beta1 kind: Deployment metadata: - name: event-exporter-v0.2.3 + name: event-exporter-v0.2.4 namespace: kube-system labels: k8s-app: event-exporter - version: v0.2.3 + version: v0.2.4 kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: @@ -42,18 +42,18 @@ spec: metadata: labels: k8s-app: event-exporter - version: v0.2.3 + version: v0.2.4 spec: serviceAccountName: event-exporter-sa containers: - name: event-exporter - image: k8s.gcr.io/event-exporter:v0.2.3 + image: k8s.gcr.io/event-exporter:v0.2.4 command: - /event-exporter - -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }} # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.3.1 + image: k8s.gcr.io/prometheus-to-sd:v0.5.0 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index 6edb781c2cb00..c43d7f1fde92c 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -80,7 +80,7 @@ spec: fi; # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.3.1 + image: k8s.gcr.io/prometheus-to-sd:v0.5.0 command: - /monitor - --stackdriver-prefix={{ prometheus_to_sd_prefix }}/addons diff --git a/cluster/addons/fluentd-gcp/scaler-deployment.yaml b/cluster/addons/fluentd-gcp/scaler-deployment.yaml index 04e0e31b8278f..a111a91be7e38 100644 --- a/cluster/addons/fluentd-gcp/scaler-deployment.yaml +++ b/cluster/addons/fluentd-gcp/scaler-deployment.yaml @@ -5,7 +5,7 @@ metadata: namespace: kube-system labels: k8s-app: fluentd-gcp-scaler - version: v0.5.0 + version: v0.5.1 addonmanager.kubernetes.io/mode: Reconcile spec: selector: @@ -19,7 +19,7 @@ spec: serviceAccountName: fluentd-gcp-scaler containers: - name: fluentd-gcp-scaler - image: k8s.gcr.io/fluentd-gcp-scaler:0.5 + image: k8s.gcr.io/fluentd-gcp-scaler:0.5.1 command: - /scaler.sh - --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }} diff --git a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml index e2d7a46fcf9ee..c883a9afb7af5 100644 --- a/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml +++ b/cluster/addons/metadata-proxy/gce/metadata-proxy.yaml @@ -57,7 +57,7 @@ spec: cpu: "30m" # BEGIN_PROMETHEUS_TO_SD - name: prometheus-to-sd-exporter - image: k8s.gcr.io/prometheus-to-sd:v0.3.1 + image: k8s.gcr.io/prometheus-to-sd:v0.5.0 # Request and limit resources to get guaranteed QoS. resources: requests: diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go index 3c59f6e488950..cac3184149574 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_deployments.go @@ -211,7 +211,7 @@ func prometheusExporterPodSpec(metricName string, metricValue int64, port int32) }, { Name: "prometheus-to-sd", - Image: "k8s.gcr.io/prometheus-to-sd:v0.3.1", + Image: "k8s.gcr.io/prometheus-to-sd:v0.5.0", ImagePullPolicy: corev1.PullPolicy("Always"), Command: []string{"/monitor", fmt.Sprintf("--source=:http://localhost:%d", port), "--stackdriver-prefix=custom.googleapis.com", "--pod-id=$(POD_ID)", "--namespace-id=$(POD_NAMESPACE)"}, From 460a39c85cb5c2c84e26177993211df880b03fd6 Mon Sep 17 00:00:00 2001 From: Masaki Kimura Date: Mon, 1 Apr 2019 10:36:05 +0200 Subject: [PATCH 46/96] Fix AWS driver fails to provision specified fsType --- pkg/volume/aws_ebs/aws_util.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/aws_ebs/aws_util.go index 95a5154a35195..864999a0ecbe6 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/aws_ebs/aws_util.go @@ -117,8 +117,10 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node * } fstype := "" - if v, ok := c.options.Parameters[volume.VolumeParameterFSType]; ok { - fstype = v + for k, v := range c.options.Parameters { + if strings.ToLower(k) == volume.VolumeParameterFSType { + fstype = v + } } return name, volumeOptions.CapacityGB, labels, fstype, nil From 98acea3763b29c27754c3ae59467db2389c08da7 Mon Sep 17 00:00:00 2001 From: Cheng Xing Date: Tue, 26 Mar 2019 14:23:08 -0700 Subject: [PATCH 47/96] Updated regional PD minimum size; changed regional PD failover test to use StorageClassTest to generate PVC template --- test/e2e/storage/regional_pd.go | 57 ++++++++++----------------------- 1 file changed, 17 insertions(+), 40 deletions(-) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index e154b05fac2c9..3c065078e9cf9 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -29,7 +29,6 @@ import ( appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -51,6 +50,7 @@ const ( statefulSetReadyTimeout = 3 * time.Minute taintKeyPrefix = "zoneTaint_" repdMinSize = "200Gi" + pvcName = "regional-pd-vol" ) var _ = utils.SIGDescribe("Regional PD", func() { @@ -150,8 +150,21 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { func testZonalFailover(c clientset.Interface, ns string) { cloudZones := getTwoRandomZones(c) - class := newRegionalStorageClass(ns, cloudZones) - claimTemplate := newClaimTemplate(ns) + testSpec := storageClassTest{ + name: "Regional PD Failover on GCE/GKE", + cloudProviders: []string{"gce", "gke"}, + provisioner: "kubernetes.io/gce-pd", + parameters: map[string]string{ + "type": "pd-standard", + "zones": strings.Join(cloudZones, ","), + "replication-type": "regional-pd", + }, + claimSize: repdMinSize, + expectedSize: repdMinSize, + } + class := newStorageClass(testSpec, ns, "" /* suffix */) + claimTemplate := newClaim(testSpec, ns, "" /* suffix */) + claimTemplate.Name = pvcName claimTemplate.Spec.StorageClassName = &class.Name statefulSet, service, regionalPDLabels := newStatefulSet(claimTemplate, ns) @@ -488,7 +501,7 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec { Name: "web", }}, VolumeMounts: []v1.VolumeMount{{ - Name: "regional-pd-vol", + Name: pvcName, MountPath: "/mnt/data/regional-pd", }}, }, @@ -497,42 +510,6 @@ func newPodTemplate(labels map[string]string) *v1.PodTemplateSpec { } } -func newClaimTemplate(ns string) *v1.PersistentVolumeClaim { - return &v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "regional-pd-vol", - Namespace: ns, - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"), - }, - }, - }, - } -} - -func newRegionalStorageClass(namespace string, zones []string) *storage.StorageClass { - return &storage.StorageClass{ - TypeMeta: metav1.TypeMeta{ - Kind: "StorageClass", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: namespace + "-sc", - }, - Provisioner: "kubernetes.io/gce-pd", - Parameters: map[string]string{ - "type": "pd-standard", - "zones": strings.Join(zones, ","), - "replication-type": "regional-pd", - }, - } -} - func getTwoRandomZones(c clientset.Interface) []string { zones, err := framework.GetClusterZones(c) Expect(err).ToNot(HaveOccurred()) From 4e64253111d604abe7cd239105739225ac28f708 Mon Sep 17 00:00:00 2001 From: yankaiz Date: Mon, 1 Apr 2019 18:26:03 -0700 Subject: [PATCH 48/96] Bump debian-iptables to v11.0.2 --- build/common.sh | 2 +- build/debian-base/Makefile | 2 +- build/debian-iptables/Makefile | 4 ++-- build/root/WORKSPACE | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/common.sh b/build/common.sh index bbe91da83e54f..dbf803ad1da7e 100755 --- a/build/common.sh +++ b/build/common.sh @@ -88,7 +88,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730 # # $1 - server architecture kube::build::get_docker_wrapped_binaries() { - debian_iptables_version=v11.0.1 + debian_iptables_version=v11.0.2 ### If you change any of these lists, please also update DOCKERIZED_BINARIES ### in build/BUILD. And kube::golang::server_image_targets case $1 in diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 97899cde16918..543ae34910ecf 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -18,7 +18,7 @@ REGISTRY ?= staging-k8s.gcr.io IMAGE ?= $(REGISTRY)/debian-base BUILD_IMAGE ?= debian-build -TAG ?= 0.4.1 +TAG ?= v1.0.0 TAR_FILE ?= rootfs.tar ARCH?=amd64 diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index d112248068318..704e5e33bfa74 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -16,12 +16,12 @@ REGISTRY?="staging-k8s.gcr.io" IMAGE=$(REGISTRY)/debian-iptables -TAG?=v11.0.1 +TAG?=v11.0.2 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x TEMP_DIR:=$(shell mktemp -d) -BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.1 +BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):v1.0.0 # This option is for running docker manifest command export DOCKER_CLI_EXPERIMENTAL := enabled diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 94fbcc457c67d..5338b697fd83b 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -70,10 +70,10 @@ http_file( docker_pull( name = "debian-iptables-amd64", - digest = "sha256:9c41b4c326304b94eb96fdd2e181aa6e9995cc4642fcdfb570cedd73a419ba39", + digest = "sha256:adc40e9ec817c15d35b26d1d6aa4d0f8096fba4c99e26a026159bb0bc98c6a89", registry = "k8s.gcr.io", repository = "debian-iptables-amd64", - tag = "v11.0.1", # ignored, but kept here for documentation + tag = "v11.0.2", # ignored, but kept here for documentation ) docker_pull( From 95fa797d905a7a2e16f0b0535a9039ad90f117a6 Mon Sep 17 00:00:00 2001 From: Tom Wanielista Date: Wed, 27 Mar 2019 09:25:12 -0400 Subject: [PATCH 49/96] Avoid panic in cronjob sorting This change handles the case where the ith cronjob may have its start time set to nil. Previously, the Less method could cause a panic in case the ith cronjob had its start time set to nil, but the jth cronjob did not. It would panic when calling Before on a nil StartTime. --- pkg/controller/cronjob/utils.go | 9 ++-- pkg/controller/cronjob/utils_test.go | 65 ++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 4 deletions(-) diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 53db1d8e00886..93a3005dd27b7 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -219,13 +219,14 @@ func (o byJobStartTime) Len() int { return len(o) } func (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o byJobStartTime) Less(i, j int) bool { - if o[j].Status.StartTime == nil { - return o[i].Status.StartTime != nil + if o[i].Status.StartTime == nil && o[j].Status.StartTime != nil { + return false + } + if o[i].Status.StartTime != nil && o[j].Status.StartTime == nil { + return true } - if o[i].Status.StartTime.Equal(o[j].Status.StartTime) { return o[i].Name < o[j].Name } - return o[i].Status.StartTime.Before(o[j].Status.StartTime) } diff --git a/pkg/controller/cronjob/utils_test.go b/pkg/controller/cronjob/utils_test.go index fb6b569f2e67d..1e5ee4d036f60 100644 --- a/pkg/controller/cronjob/utils_test.go +++ b/pkg/controller/cronjob/utils_test.go @@ -17,6 +17,8 @@ limitations under the License. package cronjob import ( + "reflect" + "sort" "strings" "testing" "time" @@ -376,3 +378,66 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) { } } } + +func TestByJobStartTime(t *testing.T) { + now := metav1.NewTime(time.Date(2018, time.January, 1, 2, 3, 4, 5, time.UTC)) + later := metav1.NewTime(time.Date(2019, time.January, 1, 2, 3, 4, 5, time.UTC)) + aNil := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "a"}, + Status: batchv1.JobStatus{}, + } + bNil := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "b"}, + Status: batchv1.JobStatus{}, + } + aSet := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "a"}, + Status: batchv1.JobStatus{StartTime: &now}, + } + bSet := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "b"}, + Status: batchv1.JobStatus{StartTime: &now}, + } + aSetLater := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{Name: "a"}, + Status: batchv1.JobStatus{StartTime: &later}, + } + + testCases := []struct { + name string + input, expected []batchv1.Job + }{ + { + name: "both have nil start times", + input: []batchv1.Job{bNil, aNil}, + expected: []batchv1.Job{aNil, bNil}, + }, + { + name: "only the first has a nil start time", + input: []batchv1.Job{aNil, bSet}, + expected: []batchv1.Job{bSet, aNil}, + }, + { + name: "only the second has a nil start time", + input: []batchv1.Job{aSet, bNil}, + expected: []batchv1.Job{aSet, bNil}, + }, + { + name: "both have non-nil, equal start time", + input: []batchv1.Job{bSet, aSet}, + expected: []batchv1.Job{aSet, bSet}, + }, + { + name: "both have non-nil, different start time", + input: []batchv1.Job{aSetLater, bSet}, + expected: []batchv1.Job{bSet, aSetLater}, + }, + } + + for _, testCase := range testCases { + sort.Sort(byJobStartTime(testCase.input)) + if !reflect.DeepEqual(testCase.input, testCase.expected) { + t.Errorf("case: '%s', jobs not sorted as expected", testCase.name) + } + } +} From 506e8ab64b14dcc17bd4b20070bd965dc3cee615 Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Thu, 25 Oct 2018 15:49:02 -0400 Subject: [PATCH 50/96] Add volume mode downgrade test: should not mount/map in <1.13 --- test/e2e/lifecycle/cluster_upgrade.go | 1 + test/e2e/storage/testsuites/volumemode.go | 50 +-------- test/e2e/storage/utils/utils.go | 42 ++++++++ test/e2e/upgrades/storage/BUILD | 8 +- test/e2e/upgrades/storage/volume_mode.go | 125 ++++++++++++++++++++++ 5 files changed, 179 insertions(+), 47 deletions(-) create mode 100644 test/e2e/upgrades/storage/volume_mode.go diff --git a/test/e2e/lifecycle/cluster_upgrade.go b/test/e2e/lifecycle/cluster_upgrade.go index c82c7e011cd0a..5d831351e5689 100644 --- a/test/e2e/lifecycle/cluster_upgrade.go +++ b/test/e2e/lifecycle/cluster_upgrade.go @@ -52,6 +52,7 @@ var upgradeTests = []upgrades.Test{ &apps.DaemonSetUpgradeTest{}, &upgrades.IngressUpgradeTest{}, &upgrades.AppArmorUpgradeTest{}, + &storage.VolumeModeDowngradeTest{}, } var gpuUpgradeTests = []upgrades.Test{ diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 774e57a3ca0d8..1c89865f7bc0f 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -304,10 +304,10 @@ func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) { Expect(err).NotTo(HaveOccurred()) By("Checking if persistent volume exists as expected volume mode") - checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") By("Checking if read/write to persistent volume works properly") - checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests } @@ -366,10 +366,10 @@ func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) { Expect(err).NotTo(HaveOccurred()) By("Checking if persistent volume exists as expected volume mode") - checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") By("Checking if read/write to persistent volume works properly") - checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") }) // TODO(mkimuram): Add more tests } @@ -401,45 +401,3 @@ func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1 return scConfig, pvConfig, pvcConfig } - -func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // Check if block exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) - - // Double check that it's not directory - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) - } else { - // Check if directory exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) - - // Double check that it's not block - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) - } -} - -func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // random -> file1 - utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") - // file1 -> dev (write to dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) - // dev -> file2 (read from dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) - // file1 == file2 (check contents) - utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") - // Clean up temp files - utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") - - // Check that writing file to block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) - } else { - // text -> file1 (write to file) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) - // grep file1 (read from file and check contents) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) - - // Check that writing to directory as block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) - } -} diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index fac42ba91590d..791f1426a2b8c 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -441,3 +441,45 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, } } + +func CheckVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // Check if block exists + VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) + + // Double check that it's not directory + VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) + } else { + // Check if directory exists + VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) + + // Double check that it's not block + VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) + } +} + +func CheckReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // random -> file1 + VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") + // file1 -> dev (write to dev) + VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) + // dev -> file2 (read from dev) + VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) + // file1 == file2 (check contents) + VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") + // Clean up temp files + VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") + + // Check that writing file to block volume fails + VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) + } else { + // text -> file1 (write to file) + VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) + // grep file1 (read from file and check contents) + VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) + + // Check that writing to directory as block volume fails + VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) + } +} diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index ed5351c7a5cbd..45e6777cd2164 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -7,12 +7,18 @@ load( go_library( name = "go_default_library", - srcs = ["persistent_volumes.go"], + srcs = [ + "persistent_volumes.go", + "volume_mode.go", + ], importpath = "k8s.io/kubernetes/test/e2e/upgrades/storage", deps = [ + "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go new file mode 100644 index 0000000000000..b7fd1a7c99fa6 --- /dev/null +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -0,0 +1,125 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/util/version" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" + "k8s.io/kubernetes/test/e2e/upgrades" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const devicePath = "/mnt/volume1" + +// VolumeModeDowngradeTest tests that a VolumeMode Block PV is not mistakenly +// formatted and mounted like a nil/Filesystem PV after a downgrade to a version +// where the BlockVolume feature is disabled +type VolumeModeDowngradeTest struct { + pvSource *v1.PersistentVolumeSource + pv *v1.PersistentVolume + pvc *v1.PersistentVolumeClaim + pod *v1.Pod +} + +func (VolumeModeDowngradeTest) Name() string { + return "[sig-storage] volume-mode-downgrade" +} + +func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { + if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") { + return true + } + + // Only run when downgrading from >= 1.13 to < 1.13 + blockVersion := version.MustParseSemantic("1.13.0-alpha.0") + if upgCtx.Versions[0].Version.LessThan(blockVersion) { + return true + } + if !upgCtx.Versions[1].Version.LessThan(blockVersion) { + return true + } + + return false +} + +// Setup creates a block pv and then verifies that a pod can consume it. The pod writes data to the volume. +func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { + + var err error + + cs := f.ClientSet + ns := f.Namespace.Name + + By("Creating a PVC") + block := v1.PersistentVolumeBlock + pvcConfig := framework.PersistentVolumeClaimConfig{ + StorageClassName: nil, + VolumeMode: &block, + } + t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) + t.pvc, err = framework.CreatePVC(cs, ns, t.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Consuming the PVC before downgrade") + t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Checking if PV exists as expected volume mode") + utils.CheckVolumeModeOfPath(t.pod, block, devicePath) + + By("Checking if read/write to PV works properly") + utils.CheckReadWriteToPath(t.pod, block, devicePath) +} + +// Test waits for the downgrade to complete, and then verifies that a pod can no +// longer consume the pv as it is not mapped nor mounted into the pod +func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { + By("Waiting for downgrade to finish") + <-done + + By("Verifying that nothing exists at the device path in the pod") + utils.VerifyExecInPodFail(t.pod, fmt.Sprintf("test -e %s", devicePath), 1) +} + +// Teardown cleans up any remaining resources. +func (t *VolumeModeDowngradeTest) Teardown(f *framework.Framework) { + By("Deleting the pod") + framework.ExpectNoError(framework.DeletePodWithWait(f, f.ClientSet, t.pod)) + + By("Deleting the PVC") + framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(t.pvc.Name, nil)) + + By("Waiting for the PV to be deleted") + framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute)) +} From 97d45edfbe5a6335ba4bb2af1f88a6a33b01d815 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Fri, 15 Mar 2019 10:18:45 -0700 Subject: [PATCH 51/96] disable HTTP2 ingress test --- test/e2e/network/ingress.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 33e326247da56..8ceb760c89aaf 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -419,7 +419,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster. }) - It("should be able to switch between HTTPS and HTTP2 modes", func() { + // TODO: remove [Unreleased] tag to once the new GCE API GO client gets revendored in ingress-gce repo + It("should be able to switch between HTTPS and HTTP2 modes [Unreleased]", func() { httpsScheme := "request_scheme=https" By("Create a basic HTTP2 ingress") From e8f23e8c048d2cab92977413f4e3cb932a86b43d Mon Sep 17 00:00:00 2001 From: "GRECO, FRANK" Date: Fri, 5 Apr 2019 14:18:10 -0700 Subject: [PATCH 52/96] ensuring that logic is checking for differences in listener --- pkg/cloudprovider/providers/aws/aws_loadbalancer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 5668e5ac1d7a4..783df48f785e5 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -1049,10 +1049,10 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala found := -1 for i, expected := range listeners { - if elbProtocolsAreEqual(actual.Protocol, expected.Protocol) { + if !elbProtocolsAreEqual(actual.Protocol, expected.Protocol) { continue } - if elbProtocolsAreEqual(actual.InstanceProtocol, expected.InstanceProtocol) { + if !elbProtocolsAreEqual(actual.InstanceProtocol, expected.InstanceProtocol) { continue } if aws.Int64Value(actual.InstancePort) != aws.Int64Value(expected.InstancePort) { @@ -1061,7 +1061,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala if aws.Int64Value(actual.LoadBalancerPort) != aws.Int64Value(expected.LoadBalancerPort) { continue } - if awsArnEquals(actual.SSLCertificateId, expected.SSLCertificateId) { + if !awsArnEquals(actual.SSLCertificateId, expected.SSLCertificateId) { continue } found = i From 2ca6dd2cc8b8505ae2cf403c08a5772db7e03ba2 Mon Sep 17 00:00:00 2001 From: Zhen Wang Date: Fri, 5 Apr 2019 10:56:00 -0700 Subject: [PATCH 53/96] Use Node-Problem-Detector v0.6.3 on GCI --- cluster/gce/gci/configure.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/gce/gci/configure.sh b/cluster/gce/gci/configure.sh index 4fd3cb58a53b6..a745419490c8b 100644 --- a/cluster/gce/gci/configure.sh +++ b/cluster/gce/gci/configure.sh @@ -26,8 +26,8 @@ set -o pipefail ### Hardcoded constants DEFAULT_CNI_VERSION="v0.7.5" DEFAULT_CNI_SHA1="52e9d2de8a5f927307d9397308735658ee44ab8d" -DEFAULT_NPD_VERSION="v0.6.0" -DEFAULT_NPD_SHA1="a28e960a21bb74bc0ae09c267b6a340f30e5b3a6" +DEFAULT_NPD_VERSION="v0.6.3" +DEFAULT_NPD_SHA1="3a6ac56be6c121f1b94450bfd1a81ad28d532369" DEFAULT_CRICTL_VERSION="v1.12.0" DEFAULT_CRICTL_SHA1="82ef8b44849f9da0589c87e9865d4716573eec7f" DEFAULT_MOUNTER_TAR_SHA="8003b798cf33c7f91320cd6ee5cec4fa22244571" From 4e4ceb9676e3e0786f47e04320f4d87851525821 Mon Sep 17 00:00:00 2001 From: Krzysztof Jastrzebski Date: Wed, 3 Apr 2019 00:24:26 +0200 Subject: [PATCH 54/96] Delete only unscheduled pods if node doesn't exist anymore. --- pkg/controller/daemon/daemon_controller.go | 13 ++++++----- .../daemon/daemon_controller_test.go | 22 ++++++++++++++++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 99b1f2b7f85a5..4eecb2c6ccf56 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -967,10 +967,10 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error { failedPodsObserved += failedPodsObservedOnNode } - // Remove pods assigned to not existing nodes when daemonset pods are scheduled by default scheduler. + // Remove unscheduled pods assigned to not existing nodes when daemonset pods are scheduled by scheduler. // If node doesn't exist then pods are never scheduled and can't be deleted by PodGCController. if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - podsToDelete = append(podsToDelete, getPodsWithoutNode(nodeList, nodeToDaemonPods)...) + podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...) } // Label new pods using the hash label value of the current history when creating them @@ -1555,8 +1555,9 @@ func failedPodsBackoffKey(ds *apps.DaemonSet, nodeName string) string { return fmt.Sprintf("%s/%d/%s", ds.UID, ds.Status.ObservedGeneration, nodeName) } -// getPodsWithoutNode returns list of pods assigned to not existing nodes. -func getPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) []string { +// getUnscheduledPodsWithoutNode returns list of unscheduled pods assigned to not existing nodes. +// Returned pods can't be deleted by PodGCController so they should be deleted by DaemonSetController. +func getUnscheduledPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) []string { var results []string isNodeRunning := make(map[string]bool) for _, node := range runningNodesList { @@ -1565,7 +1566,9 @@ func getPodsWithoutNode(runningNodesList []*v1.Node, nodeToDaemonPods map[string for n, pods := range nodeToDaemonPods { if !isNodeRunning[n] { for _, pod := range pods { - results = append(results, pod.Name) + if len(pod.Spec.NodeName) == 0 { + results = append(results, pod.Name) + } } } } diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index fae6c480b6904..064ef7920bdf4 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -2664,7 +2664,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { } } -func TestDeletePodForNotExistingNode(t *testing.T) { +func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) { for _, f := range []bool{true, false} { setFeatureGate(t, features.ScheduleDaemonSetPods, f) for _, strategy := range updateStrategies() { @@ -2678,6 +2678,26 @@ func TestDeletePodForNotExistingNode(t *testing.T) { addNodes(manager.nodeStore, 0, 1, nil) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + + podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds) + podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchFields: []v1.NodeSelectorRequirement{ + { + Key: algorithm.NodeFieldSelectorKeyNodeName, + Operator: v1.NodeSelectorOpIn, + Values: []string{"node-2"}, + }, + }, + }, + }, + }, + }, + } + manager.podStore.Add(podScheduledUsingAffinity) if f { syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) } else { From b262180fc91e712b8d804a9ab0207ad37133fc16 Mon Sep 17 00:00:00 2001 From: Igor German Date: Tue, 9 Apr 2019 14:58:49 +0300 Subject: [PATCH 55/96] proxy: Take into account exclude CIDRs while deleting legacy real servers --- pkg/proxy/ipvs/proxier.go | 53 ++++++++++++----------- pkg/proxy/ipvs/proxier_test.go | 79 ++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 25 deletions(-) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 44c72f1d14dc1..942c4f463a964 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -1623,15 +1623,17 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, currentServices map[string]*utilipvs.VirtualServer, legacyBindAddrs map[string]bool) { for cs := range currentServices { svc := currentServices[cs] + if proxier.isIPInExcludeCIDRs(svc.Address) { + continue + } if _, ok := activeServices[cs]; !ok { - // This service was not processed in the latest sync loop so before deleting it, - okayToDelete := true rsList, _ := proxier.ipvs.GetRealServers(svc) // If we still have real servers graceful termination is not done if len(rsList) > 0 { - okayToDelete = false + continue } + // Applying graceful termination to all real servers for _, rs := range rsList { uniqueRS := GetUniqueRSName(svc, rs) @@ -1644,35 +1646,36 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre glog.Errorf("Failed to delete destination: %v, error: %v", uniqueRS, err) } } - // make sure it does not fall within an excluded CIDR range. - for _, excludedCIDR := range proxier.excludeCIDRs { - // Any validation of this CIDR already should have occurred. - _, n, _ := net.ParseCIDR(excludedCIDR) - if n.Contains(svc.Address) { - okayToDelete = false - break - } + glog.V(4).Infof("Delete service %s", svc.String()) + if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { + glog.Errorf("Failed to delete service %s, error: %v", svc.String(), err) } - if okayToDelete { - glog.V(4).Infof("Delete service %s", svc.String()) - if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { - glog.Errorf("Failed to delete service %s, error: %v", svc.String(), err) - } - addr := svc.Address.String() - if _, ok := legacyBindAddrs[addr]; ok { - glog.V(4).Infof("Unbinding address %s", addr) - if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil { - glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) - } else { - // In case we delete a multi-port service, avoid trying to unbind multiple times - delete(legacyBindAddrs, addr) - } + addr := svc.Address.String() + if _, ok := legacyBindAddrs[addr]; ok { + glog.V(4).Infof("Unbinding address %s", addr) + if err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice); err != nil { + glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) + } else { + // In case we delete a multi-port service, avoid trying to unbind multiple times + delete(legacyBindAddrs, addr) } } } } } +func (proxier *Proxier) isIPInExcludeCIDRs(ip net.IP) bool { + // make sure it does not fall within an excluded CIDR range. + for _, excludedCIDR := range proxier.excludeCIDRs { + // Any validation of this CIDR already should have occurred. + _, n, _ := net.ParseCIDR(excludedCIDR) + if n.Contains(ip) { + return true + } + } + return false +} + func (proxier *Proxier) getLegacyBindAddr(activeBindAddrs map[string]bool, currentBindAddrs []string) map[string]bool { legacyAddrs := make(map[string]bool) for _, addr := range currentBindAddrs { diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index 2e4ef46702f2c..61aec14c612ad 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -2906,3 +2906,82 @@ func TestCleanLegacyService(t *testing.T) { } } } + +func TestCleanLegacyRealServersExcludeCIDRs(t *testing.T) { + ipt := iptablestest.NewFake() + ipvs := ipvstest.NewFake() + ipset := ipsettest.NewFake(testIPSetVersion) + gtm := NewGracefulTerminationManager(ipvs) + + excludeCIDRs := []string{"4.4.4.4/32"} + proxier, err := NewProxier( + ipt, + ipvs, + ipset, + NewFakeSysctl(), + exec.New(), + 250*time.Millisecond, + 100*time.Millisecond, + excludeCIDRs, + false, + 0, + "10.0.0.0/24", + testHostname, + net.ParseIP("127.0.0.1"), + nil, + nil, + DefaultScheduler, + make([]string, 0), + ) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + proxier.gracefuldeleteManager = gtm + + vs := &utilipvs.VirtualServer{ + Address: net.ParseIP("4.4.4.4"), + Protocol: string(v1.ProtocolUDP), + Port: 56, + Scheduler: "rr", + Flags: utilipvs.FlagHashed, + } + + proxier.ipvs.AddVirtualServer(vs) + + rss := []*utilipvs.RealServer{ + { + Address: net.ParseIP("10.10.10.10"), + Port: 56, + ActiveConn: 0, + InactiveConn: 0, + }, + { + Address: net.ParseIP("11.11.11.11"), + Port: 56, + ActiveConn: 0, + InactiveConn: 0, + }, + } + for _, rs := range rss { + proxier.ipvs.AddRealServer(vs, rs) + } + + proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) + + proxier.netlinkHandle.EnsureAddressBind("4.4.4.4", DefaultDummyDevice) + + proxier.cleanLegacyService( + map[string]bool{}, + map[string]*utilipvs.VirtualServer{"ipvs0": vs}, + map[string]bool{"4.4.4.4": true}, + ) + + proxier.gracefuldeleteManager.tryDeleteRs() + + remainingRealServers, _ := proxier.ipvs.GetRealServers(vs) + + if len(remainingRealServers) != 2 { + t.Errorf("Expected number of remaining IPVS real servers after cleanup should be %v. Got %v", 2, len(remainingRealServers)) + } +} From 5eae1b7c0a0a4c17540b6a3a7881c13d18e2d047 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 7 Jan 2019 13:08:18 +0800 Subject: [PATCH 56/96] Increase default maximumLoadBalancerRuleCount to 250 --- pkg/cloudprovider/providers/azure/azure.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 6b3a679574209..05a076ee9459f 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -48,14 +48,15 @@ import ( const ( // CloudProviderName is the value used for the --cloud-provider flag - CloudProviderName = "azure" - rateLimitQPSDefault = 1.0 - rateLimitBucketDefault = 5 - backoffRetriesDefault = 6 - backoffExponentDefault = 1.5 - backoffDurationDefault = 5 // in seconds - backoffJitterDefault = 1.0 - maximumLoadBalancerRuleCount = 148 // According to Azure LB rule default limit + CloudProviderName = "azure" + rateLimitQPSDefault = 1.0 + rateLimitBucketDefault = 5 + backoffRetriesDefault = 6 + backoffExponentDefault = 1.5 + backoffDurationDefault = 5 // in seconds + backoffJitterDefault = 1.0 + // According to https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#load-balancer. + maximumLoadBalancerRuleCount = 250 vmTypeVMSS = "vmss" vmTypeStandard = "standard" From 1a484809d81e304b03d7c63719e3e2d5a27fe5f0 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 3 Jan 2019 10:23:20 -0800 Subject: [PATCH 57/96] kube-proxy: rename internal field for clarity --- pkg/proxy/iptables/proxier.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 41f03d49cde00..8d7777bb30de2 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -357,7 +357,7 @@ func NewProxier(ipt utiliptables.Interface, type iptablesJumpChain struct { table utiliptables.Table - chain utiliptables.Chain + dstChain utiliptables.Chain sourceChain utiliptables.Chain comment string extraArgs []string @@ -386,7 +386,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { for _, chain := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { args := append(chain.extraArgs, "-m", "comment", "--comment", chain.comment, - "-j", string(chain.chain), + "-j", string(chain.dstChain), ) if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { @@ -663,16 +663,16 @@ func (proxier *Proxier) syncProxyRules() { // Create and link the kube chains. for _, chain := range iptablesJumpChains { - if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil { + if _, err := proxier.iptables.EnsureChain(chain.table, chain.dstChain); err != nil { glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) return } args := append(chain.extraArgs, "-m", "comment", "--comment", chain.comment, - "-j", string(chain.chain), + "-j", string(chain.dstChain), ) if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.dstChain, err) return } } From ae1e6fd453e07eece672cd738364a9b8318a7eaa Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 3 Jan 2019 10:25:55 -0800 Subject: [PATCH 58/96] kube-proxy: rename vars for clarity, fix err str --- pkg/proxy/iptables/proxier.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 8d7777bb30de2..c8a13fc26b01a 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -383,12 +383,12 @@ var iptablesCleanupOnlyChains = []iptablesJumpChain{ // It returns true if an error was encountered. Errors are logged. func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Unlink our chains - for _, chain := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { - args := append(chain.extraArgs, - "-m", "comment", "--comment", chain.comment, - "-j", string(chain.dstChain), + for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { + args := append(jump.extraArgs, + "-m", "comment", "--comment", jump.comment, + "-j", string(jump.dstChain), ) - if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil { + if err := ipt.DeleteRule(jump.table, jump.sourceChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true @@ -662,17 +662,17 @@ func (proxier *Proxier) syncProxyRules() { glog.V(3).Infof("Syncing iptables rules") // Create and link the kube chains. - for _, chain := range iptablesJumpChains { - if _, err := proxier.iptables.EnsureChain(chain.table, chain.dstChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) + for _, jump := range iptablesJumpChains { + if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", jump.table, jump.dstChain, err) return } - args := append(chain.extraArgs, - "-m", "comment", "--comment", chain.comment, - "-j", string(chain.dstChain), + args := append(jump.extraArgs, + "-m", "comment", "--comment", jump.comment, + "-j", string(jump.dstChain), ) - if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.dstChain, err) + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.sourceChain, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jump.table, jump.sourceChain, jump.dstChain, err) return } } From a3749f0830423b638d78f3236f4feaa27f310284 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 3 Jan 2019 10:26:51 -0800 Subject: [PATCH 59/96] kube-proxy: rename field for congruence --- pkg/proxy/iptables/proxier.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index c8a13fc26b01a..39b134c19b708 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -356,11 +356,11 @@ func NewProxier(ipt utiliptables.Interface, } type iptablesJumpChain struct { - table utiliptables.Table - dstChain utiliptables.Chain - sourceChain utiliptables.Chain - comment string - extraArgs []string + table utiliptables.Table + dstChain utiliptables.Chain + srcChain utiliptables.Chain + comment string + extraArgs []string } var iptablesJumpChains = []iptablesJumpChain{ @@ -388,7 +388,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { "-m", "comment", "--comment", jump.comment, "-j", string(jump.dstChain), ) - if err := ipt.DeleteRule(jump.table, jump.sourceChain, args...); err != nil { + if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true @@ -671,8 +671,8 @@ func (proxier *Proxier) syncProxyRules() { "-m", "comment", "--comment", jump.comment, "-j", string(jump.dstChain), ) - if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.sourceChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jump.table, jump.sourceChain, jump.dstChain, err) + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jump.table, jump.srcChain, jump.dstChain, err) return } } From cd5c0459e9645365bd891683761fc920679666f7 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 3 Jan 2019 10:35:43 -0800 Subject: [PATCH 60/96] kube-proxy: remove old cleanup rules --- pkg/proxy/iptables/proxier.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 39b134c19b708..1508086cece74 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -372,12 +372,7 @@ var iptablesJumpChains = []iptablesJumpChain{ {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, } -var iptablesCleanupOnlyChains = []iptablesJumpChain{ - // Present in kube 1.6 - 1.9. Removed by #56164 in favor of kubeExternalServicesChain - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", nil}, - // Present in kube <= 1.9. Removed by #60306 in favor of rule with extraArgs - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, -} +var iptablesCleanupOnlyChains = []iptablesJumpChain{} // CleanupLeftovers removes all iptables rules and chains created by the Proxier // It returns true if an error was encountered. Errors are logged. From 685804c937132084a553548d42c54e83052794c3 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 3 Jan 2019 10:33:11 -0800 Subject: [PATCH 61/96] kube-proxy: reject 0 endpoints on forward Previously we only REJECTed on OUTPUT which works for packets from the node but not for packets from pods on the node. --- pkg/proxy/iptables/proxier.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 1508086cece74..fe35b1a8984cb 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -365,11 +365,12 @@ type iptablesJumpChain struct { var iptablesJumpChains = []iptablesJumpChain{ {utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, + {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, + {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals", nil}, {utiliptables.TableNAT, kubePostroutingChain, utiliptables.ChainPostrouting, "kubernetes postrouting rules", nil}, - {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, } var iptablesCleanupOnlyChains = []iptablesJumpChain{} From e53e0bb8b05696e20e5b2f7a8490cd3829e36522 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 7 Mar 2019 17:14:18 -0800 Subject: [PATCH 62/96] Fix small race in e2e Occasionally we get spurious errors about "no route to host" when we race with kube-proxy. This should reduce that. It's mostly just log noise. --- test/e2e/framework/service_util.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 01b6e90201dc1..71245f1fc95ec 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -598,6 +598,7 @@ func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout // name as the jig and runs the "netexec" container. func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController { var replicas int32 = 1 + var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down rc := &v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ @@ -629,7 +630,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll }, }, }, - TerminationGracePeriodSeconds: new(int64), + TerminationGracePeriodSeconds: &grace, }, }, }, From 6b57e925945840414d256b9048489de6b15de179 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Thu, 7 Mar 2019 17:08:44 -0800 Subject: [PATCH 63/96] Retool HTTP and UDP e2e utils This is a prefactoring for followup changes that need to use very similar but subtly different test. Now it is more generic, though it pushes a little logic up the stack. That makes sense to me. --- test/e2e/framework/networking_utils.go | 354 +++++++++++++++---------- test/e2e/framework/service_util.go | 68 +++-- test/e2e/network/firewall.go | 11 + test/e2e/network/service.go | 20 +- 4 files changed, 283 insertions(+), 170 deletions(-) diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 0ce339842a7e8..228e71fd6e291 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -17,7 +17,6 @@ limitations under the License. package framework import ( - "bytes" "encoding/json" "fmt" "io/ioutil" @@ -708,197 +707,263 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n Expect(err).NotTo(HaveOccurred()) } -// Does an HTTP GET, but does not reuse TCP connections -// This masks problems where the iptables rule has changed, but we don't see it -// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout -func httpGetNoConnectionPool(url string) (*http.Response, error) { - return httpGetNoConnectionPoolTimeout(url, 5*time.Second) -} - -func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { - tr := utilnet.SetTransportDefaults(&http.Transport{ - DisableKeepAlives: true, - }) - client := &http.Client{ - Transport: tr, - Timeout: timeout, - } - - return client.Get(url) -} - -func TestReachableHTTP(ip string, port int, request string, expect string) (bool, error) { - return TestReachableHTTPWithContent(ip, port, request, expect, nil) -} - -func TestReachableHTTPWithRetriableErrorCodes(ip string, port int, request string, expect string, retriableErrCodes []int) (bool, error) { - return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, nil, retriableErrCodes, time.Second*5) +type HTTPPokeParams struct { + Timeout time.Duration + ExpectCode int // default = 200 + BodyContains string + RetriableCodes []int } -func TestReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) { - return TestReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second) +type HTTPPokeResult struct { + Status HTTPPokeStatus + Code int // HTTP code: 0 if the connection was not made + Error error // if there was any error + Body []byte // if code != 0 } -func TestReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) { - return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, content, []int{}, timeout) -} +type HTTPPokeStatus string -func TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip string, port int, request string, expect string, content *bytes.Buffer, retriableErrCodes []int, timeout time.Duration) (bool, error) { +const ( + HTTPSuccess HTTPPokeStatus = "Success" + HTTPError HTTPPokeStatus = "UnknownError" + // Any time we add new errors, we should audit all callers of this. + HTTPTimeout HTTPPokeStatus = "TimedOut" + HTTPRefused HTTPPokeStatus = "ConnectionRefused" + HTTPRetryCode HTTPPokeStatus = "RetryCode" + HTTPWrongCode HTTPPokeStatus = "WrongCode" + HTTPBadResponse HTTPPokeStatus = "BadResponse" +) - ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) - url := fmt.Sprintf("http://%s%s", ipPort, request) - if ip == "" { - Failf("Got empty IP for reachability check (%s)", url) - return false, nil +// PokeHTTP tries to connect to a host on a port for a given URL path. Callers +// can specify additional success parameters, if desired. +// +// The result status will be characterized as precisely as possible, given the +// known users of this. +// +// The result code will be zero in case of any failure to connect, or non-zero +// if the HTTP transaction completed (even if the other test params make this a +// failure). +// +// The result error will be populated for any status other than Success. +// +// The result body will be populated if the HTTP transaction was completed, even +// if the other test params make this a failure). +func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPokeResult { + hostPort := net.JoinHostPort(host, strconv.Itoa(port)) + url := fmt.Sprintf("http://%s%s", hostPort, path) + + ret := HTTPPokeResult{} + + // Sanity check inputs, because it has happened. These are the only things + // that should hard fail the test - they are basically ASSERT()s. + if host == "" { + Failf("Got empty host for HTTP poke (%s)", url) + return ret } if port == 0 { - Failf("Got port==0 for reachability check (%s)", url) - return false, nil + Failf("Got port==0 for HTTP poke (%s)", url) + return ret + } + + // Set default params. + if params == nil { + params = &HTTPPokeParams{} + } + if params.ExpectCode == 0 { + params.ExpectCode = http.StatusOK } - Logf("Testing HTTP reachability of %v", url) + Logf("Poking %q", url) - resp, err := httpGetNoConnectionPoolTimeout(url, timeout) + resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout) if err != nil { - Logf("Got error testing for reachability of %s: %v", url, err) - return false, nil + ret.Error = err + neterr, ok := err.(net.Error) + if ok && neterr.Timeout() { + ret.Status = HTTPTimeout + } else if strings.Contains(err.Error(), "connection refused") { + ret.Status = HTTPRefused + } else { + ret.Status = HTTPError + } + Logf("Poke(%q): %v", url, err) + return ret } + + ret.Code = resp.StatusCode + defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - Logf("Got error reading response from %s: %v", url, err) - return false, nil + ret.Status = HTTPError + ret.Error = fmt.Errorf("error reading HTTP body: %v", err) + Logf("Poke(%q): %v", url, ret.Error) + return ret } - if resp.StatusCode != 200 { - for _, code := range retriableErrCodes { + ret.Body = make([]byte, len(body)) + copy(ret.Body, body) + + if resp.StatusCode != params.ExpectCode { + for _, code := range params.RetriableCodes { if resp.StatusCode == code { - Logf("Got non-success status %q when trying to access %s, but the error code is retriable", resp.Status, url) - return false, nil + ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode) + ret.Status = HTTPRetryCode + Logf("Poke(%q): %v", url, ret.Error) + return ret } } - return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", - resp.Status, url, string(body)) + ret.Status = HTTPWrongCode + ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode) + Logf("Poke(%q): %v", url, ret.Error) + return ret } - if !strings.Contains(string(body), expect) { - return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) - } - if content != nil { - content.Write(body) - } - return true, nil -} -func TestNotReachableHTTP(ip string, port int) (bool, error) { - return TestNotReachableHTTPTimeout(ip, port, 5*time.Second) -} - -func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) { - ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) - url := fmt.Sprintf("http://%s", ipPort) - if ip == "" { - Failf("Got empty IP for non-reachability check (%s)", url) - return false, nil - } - if port == 0 { - Failf("Got port==0 for non-reachability check (%s)", url) - return false, nil + if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) { + ret.Status = HTTPBadResponse + ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body)) + Logf("Poke(%q): %v", url, ret.Error) + return ret } - Logf("Testing HTTP non-reachability of %v", url) - - resp, err := httpGetNoConnectionPoolTimeout(url, timeout) - if err != nil { - Logf("Confirmed that %s is not reachable", url) - return true, nil - } - resp.Body.Close() - return false, nil + ret.Status = HTTPSuccess + Logf("Poke(%q): success", url) + return ret } -func TestReachableUDP(ip string, port int, request string, expect string) (bool, error) { - ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) - uri := fmt.Sprintf("udp://%s", ipPort) - if ip == "" { - Failf("Got empty IP for reachability check (%s)", uri) - return false, nil - } - if port == 0 { - Failf("Got port==0 for reachability check (%s)", uri) - return false, nil +// Does an HTTP GET, but does not reuse TCP connections +// This masks problems where the iptables rule has changed, but we don't see it +func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { + tr := utilnet.SetTransportDefaults(&http.Transport{ + DisableKeepAlives: true, + }) + client := &http.Client{ + Transport: tr, + Timeout: timeout, } - Logf("Testing UDP reachability of %v", uri) + return client.Get(url) +} - con, err := net.Dial("udp", ipPort) - if err != nil { - return false, fmt.Errorf("Failed to dial %s: %v", ipPort, err) - } +type UDPPokeParams struct { + Timeout time.Duration + Response string +} - _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) - if err != nil { - return false, fmt.Errorf("Failed to send request: %v", err) - } +type UDPPokeResult struct { + Status UDPPokeStatus + Error error // if there was any error + Response []byte // if code != 0 +} - var buf []byte = make([]byte, len(expect)+1) +type UDPPokeStatus string - err = con.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return false, fmt.Errorf("Failed to set deadline: %v", err) - } +const ( + UDPSuccess UDPPokeStatus = "Success" + UDPError UDPPokeStatus = "UnknownError" + // Any time we add new errors, we should audit all callers of this. + UDPTimeout UDPPokeStatus = "TimedOut" + UDPRefused UDPPokeStatus = "ConnectionRefused" + UDPBadResponse UDPPokeStatus = "BadResponse" +) - _, err = con.Read(buf) - if err != nil { - return false, nil +// PokeUDP tries to connect to a host on a port and send the given request. Callers +// can specify additional success parameters, if desired. +// +// The result status will be characterized as precisely as possible, given the +// known users of this. +// +// The result error will be populated for any status other than Success. +// +// The result response will be populated if the UDP transaction was completed, even +// if the other test params make this a failure). +func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPokeResult { + hostPort := net.JoinHostPort(host, strconv.Itoa(port)) + url := fmt.Sprintf("udp://%s", hostPort) + + ret := UDPPokeResult{} + + // Sanity check inputs, because it has happened. These are the only things + // that should hard fail the test - they are basically ASSERT()s. + if host == "" { + Failf("Got empty host for UDP poke (%s)", url) + return ret } - - if !strings.Contains(string(buf), expect) { - return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) + if port == 0 { + Failf("Got port==0 for UDP poke (%s)", url) + return ret } - Logf("Successfully reached %v", uri) - return true, nil -} - -func TestNotReachableUDP(ip string, port int, request string) (bool, error) { - ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) - uri := fmt.Sprintf("udp://%s", ipPort) - if ip == "" { - Failf("Got empty IP for reachability check (%s)", uri) - return false, nil - } - if port == 0 { - Failf("Got port==0 for reachability check (%s)", uri) - return false, nil + // Set default params. + if params == nil { + params = &UDPPokeParams{} } - Logf("Testing UDP non-reachability of %v", uri) + Logf("Poking %v", url) - con, err := net.Dial("udp", ipPort) + con, err := net.Dial("udp", hostPort) if err != nil { - Logf("Confirmed that %s is not reachable", uri) - return true, nil + ret.Status = UDPError + ret.Error = err + Logf("Poke(%q): %v", url, err) + return ret } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { - Logf("Confirmed that %s is not reachable", uri) - return true, nil + ret.Error = err + neterr, ok := err.(net.Error) + if ok && neterr.Timeout() { + ret.Status = UDPTimeout + } else if strings.Contains(err.Error(), "connection refused") { + ret.Status = UDPRefused + } else { + ret.Status = UDPError + } + Logf("Poke(%q): %v", url, err) + return ret } - var buf []byte = make([]byte, 1) + if params.Timeout != 0 { + err = con.SetDeadline(time.Now().Add(params.Timeout)) + if err != nil { + ret.Status = UDPError + ret.Error = err + Logf("Poke(%q): %v", url, err) + return ret + } + } - err = con.SetDeadline(time.Now().Add(3 * time.Second)) + bufsize := len(params.Response) + 1 + if bufsize == 0 { + bufsize = 4096 + } + var buf []byte = make([]byte, bufsize) + n, err := con.Read(buf) if err != nil { - return false, fmt.Errorf("Failed to set deadline: %v", err) + ret.Error = err + neterr, ok := err.(net.Error) + if ok && neterr.Timeout() { + ret.Status = UDPTimeout + } else if strings.Contains(err.Error(), "connection refused") { + ret.Status = UDPRefused + } else { + ret.Status = UDPError + } + Logf("Poke(%q): %v", url, err) + return ret } + ret.Response = buf[0:n] - _, err = con.Read(buf) - if err != nil { - Logf("Confirmed that %s is not reachable", uri) - return true, nil + if params.Response != "" && string(ret.Response) != params.Response { + ret.Status = UDPBadResponse + ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response)) + Logf("Poke(%q): %v", url, ret.Error) + return ret } - return false, nil + ret.Status = UDPSuccess + Logf("Poke(%q): success", url) + return ret } func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { @@ -911,13 +976,12 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout hittedHosts := sets.NewString() count := 0 condition := func() (bool, error) { - var respBody bytes.Buffer - reached, err := TestReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody, - 1*time.Second) - if err != nil || !reached { + result := PokeHTTP(externalIP, int(httpPort), "/hostname", &HTTPPokeParams{Timeout: 1 * time.Second}) + if result.Status != HTTPSuccess { return false, nil } - hittedHost := strings.TrimSpace(respBody.String()) + + hittedHost := strings.TrimSpace(string(result.Body)) if !expectedHosts.Has(hittedHost) { Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count) count = 0 diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 71245f1fc95ec..2e6b302ea06eb 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -851,9 +851,19 @@ func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.D } func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { - if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { - return TestReachableHTTPWithRetriableErrorCodes(host, port, "/echo?msg=hello", "hello", retriableErrCodes) - }); err != nil { + pollfn := func() (bool, error) { + result := PokeHTTP(host, port, "/echo?msg=hello", + &HTTPPokeParams{ + BodyContains: "hello", + RetriableCodes: retriableErrCodes, + }) + if result.Status == HTTPSuccess { + return true, nil + } + return false, nil // caller can retry + } + + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { if err == wait.ErrWaitTimeout { Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout) } else { @@ -863,36 +873,60 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p } func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableHTTP(host, port) }); err != nil { - Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) + pollfn := func() (bool, error) { + result := PokeHTTP(host, port, "/", nil) + if result.Code == 0 { + return true, nil + } + return false, nil // caller can retry + } + + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestReachableUDP(host, port, "echo hello", "hello") }); err != nil { + pollfn := func() (bool, error) { + result := PokeUDP(host, port, "echo hello", &UDPPokeParams{ + Timeout: 3 * time.Second, + Response: "hello", + }) + if result.Status == UDPSuccess { + return true, nil + } + return false, nil // caller can retry + } + + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { - if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableUDP(host, port, "echo hello") }); err != nil { - Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) + pollfn := func() (bool, error) { + result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) + if result.Status != UDPSuccess && result.Status != UDPError { + return true, nil + } + return false, nil // caller can retry + } + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { var body bytes.Buffer - var err error if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { - var result bool - result, err = TestReachableHTTPWithContent(host, port, url, "", &body) - if err != nil { - Logf("Error hitting %v:%v%v, retrying: %v", host, port, url, err) - return false, nil + result := PokeHTTP(host, port, url, nil) + if result.Status == HTTPSuccess { + body.Write(result.Body) + return true, nil } - return result, nil + return false, nil }); pollErr != nil { - Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, err) + Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr) } return body } @@ -905,7 +939,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err return false, fmt.Errorf("Invalid input ip or port") } Logf("Testing HTTP health check on %v", url) - resp, err := httpGetNoConnectionPool(url) + resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second) if err != nil { Logf("Got error testing for reachability of %s: %v", url, err) return false, err diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index f665ff3c6d128..c4f24dd4895b5 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -18,6 +18,7 @@ package network import ( "fmt" + "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -182,3 +183,13 @@ var _ = SIGDescribe("Firewall rule", func() { Expect(flag).To(BeTrue()) }) }) + +func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { + result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout}) + if result.Status == framework.HTTPError { + framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) + } + if result.Code != 0 { + framework.Failf("Was unexpectedly able to reach %s:%d", ip, port) + } +} diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 441a00bc59e6d..34178fbc1e18f 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1931,14 +1931,18 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for nodeName, nodeIPs := range endpointNodeMap { By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) var body bytes.Buffer - var result bool - var err error - if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, func() (bool, error) { - result, err = framework.TestReachableHTTPWithContent(nodeIPs[0], healthCheckNodePort, "/healthz", "", &body) - return !result, nil - }); pollErr != nil { - framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. Last err %v, last body %v", - nodeName, healthCheckNodePort, err, body.String()) + pollfn := func() (bool, error) { + result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) + if result.Code == 0 { + return true, nil + } + body.Reset() + body.Write(result.Body) + return false, nil + } + if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil { + framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", + nodeName, healthCheckNodePort, body.String()) } } From 928d62eac0f863fe78775359403c30f389dd2c8a Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Mon, 18 Feb 2019 23:52:24 -0800 Subject: [PATCH 64/96] Kube-proxy: REJECT LB IPs with no endpoints We REJECT every other case. Close this FIXME. To get this to work in all cases, we have to process service in filter.INPUT, since LB IPS might be manged as local addresses. --- pkg/proxy/iptables/proxier.go | 23 +++++++++--- test/e2e/framework/networking_utils.go | 23 ++++++++++++ test/e2e/framework/service_util.go | 49 ++++++++++++++++++++++++++ test/e2e/network/service.go | 40 +++++++++++++++++++-- 4 files changed, 128 insertions(+), 7 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index fe35b1a8984cb..0920e8ace18b7 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -367,6 +367,7 @@ var iptablesJumpChains = []iptablesJumpChain{ {utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, + {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals", nil}, @@ -826,6 +827,7 @@ func (proxier *Proxier) syncProxyRules() { } writeLine(proxier.natRules, append(args, "-j", string(svcChain))...) } else { + // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), @@ -896,6 +898,7 @@ func (proxier *Proxier) syncProxyRules() { // This covers cases like GCE load-balancers which get added to the local routing table. writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...) } else { + // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeExternalServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), @@ -908,10 +911,10 @@ func (proxier *Proxier) syncProxyRules() { } // Capture load-balancer ingress. - if hasEndpoints { - fwChain := svcInfo.serviceFirewallChainName - for _, ingress := range svcInfo.LoadBalancerStatus.Ingress { - if ingress.IP != "" { + fwChain := svcInfo.serviceFirewallChainName + for _, ingress := range svcInfo.LoadBalancerStatus.Ingress { + if ingress.IP != "" { + if hasEndpoints { // create service firewall chain if chain, ok := existingNATChains[fwChain]; ok { writeBytesLine(proxier.natChains, chain) @@ -972,10 +975,19 @@ func (proxier *Proxier) syncProxyRules() { // If the packet was able to reach the end of firewall chain, then it did not get DNATed. // It means the packet cannot go thru the firewall, then mark it for DROP writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...) + } else { + // No endpoints. + writeLine(proxier.filterRules, + "-A", string(kubeServicesChain), + "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), + "-m", protocol, "-p", protocol, + "-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), + "--dport", strconv.Itoa(svcInfo.Port), + "-j", "REJECT", + ) } } } - // FIXME: do we need REJECT rules for load-balancer ingress if !hasEndpoints? // Capture nodeports. If we had more than 2 rules it might be // worthwhile to make a new per-service chain for nodeport rules, but @@ -1057,6 +1069,7 @@ func (proxier *Proxier) syncProxyRules() { writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...) } } else { + // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeExternalServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 228e71fd6e291..9acef9353584e 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -830,6 +830,29 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo return ret } +func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) { + ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) + url := fmt.Sprintf("http://%s", ipPort) + if ip == "" { + Failf("Got empty IP for non-reachability check (%s)", url) + return false, nil + } + if port == 0 { + Failf("Got port==0 for non-reachability check (%s)", url) + return false, nil + } + + Logf("Testing HTTP non-reachability of %v", url) + + resp, err := httpGetNoConnectionPoolTimeout(url, timeout) + if err != nil { + Logf("Confirmed that %s is not reachable", url) + return true, nil + } + resp.Body.Close() + return false, nil +} + // Does an HTTP GET, but does not reuse TCP connections // This masks problems where the iptables rule has changed, but we don't see it func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 2e6b302ea06eb..7ff873b6e8084 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -713,6 +713,28 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati return result } +func (j *ServiceTestJig) Scale(namespace string, replicas int) { + rc := j.Name + scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{}) + if err != nil { + Failf("Failed to get scale for RC %q: %v", rc, err) + } + + scale.Spec.Replicas = int32(replicas) + _, err = j.Client.CoreV1().ReplicationControllers(namespace).UpdateScale(rc, scale) + if err != nil { + Failf("Failed to scale RC %q: %v", rc, err) + } + pods, err := j.waitForPodsCreated(namespace, replicas) + if err != nil { + Failf("Failed waiting for pods: %v", err) + } + if err := j.waitForPodsReady(namespace, pods); err != nil { + Failf("Failed waiting for pods to be running: %v", err) + } + return +} + func (j *ServiceTestJig) waitForPdbReady(namespace string) error { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { @@ -886,6 +908,20 @@ func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout tim } } +func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) { + pollfn := func() (bool, error) { + result := PokeHTTP(host, port, "/", nil) + if result.Status == HTTPRefused { + return true, nil + } + return false, nil // caller can retry + } + + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + Failf("HTTP service %v:%v not rejected: %v", host, port, err) + } +} + func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { pollfn := func() (bool, error) { result := PokeUDP(host, port, "echo hello", &UDPPokeParams{ @@ -916,6 +952,19 @@ func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time } } +func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) { + pollfn := func() (bool, error) { + result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) + if result.Status == UDPRefused { + return true, nil + } + return false, nil // caller can retry + } + if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + Failf("UDP service %v:%v not rejected: %v", host, port, err) + } +} + func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { var body bytes.Buffer if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 34178fbc1e18f..cf62ddc4eee53 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -789,11 +789,47 @@ var _ = SIGDescribe("Services", func() { jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB + jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) + } + + By("Scaling the pods to 0") + jig.Scale(ns1, 0) + jig.Scale(ns2, 0) + + By("looking for ICMP REJECT on the TCP service's NodePort") + jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + + By("looking for ICMP REJECT on the UDP service's NodePort") + jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + + By("looking for ICMP REJECT on the TCP service's LoadBalancer") + jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + + if loadBalancerSupportsUDP { + By("looking for ICMP REJECT on the UDP service's LoadBalancer") + jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) + } + + By("Scaling the pods to 1") + jig.Scale(ns1, 1) + jig.Scale(ns2, 1) + + By("hitting the TCP service's NodePort") + jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) + + By("hitting the UDP service's NodePort") + jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) + + By("hitting the TCP service's LoadBalancer") + jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + + if loadBalancerSupportsUDP { + By("hitting the UDP service's LoadBalancer") + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } // Change the services back to ClusterIP. From ceefd18ee60a3efbd3bdbe8ea9fd1c5b08cb3bd2 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 17 Apr 2019 11:42:29 +0800 Subject: [PATCH 65/96] Fix Azure SLB support for multiple backend pools Azure VM and vmssVM support multiple backend pools for the same SLB, but not for different LBs. --- .../providers/azure/azure_standard.go | 19 +++-- .../providers/azure/azure_vmss.go | 19 +++-- .../providers/azure/azure_wrap.go | 26 +++++++ .../providers/azure/azure_wrap_test.go | 75 +++++++++++++++++++ 4 files changed, 123 insertions(+), 16 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index 56868e8b5a9fc..d408a6af18386 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -673,17 +673,20 @@ func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types. // sets, the same network interface couldn't be added to more than one load balancer of // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain // about this. + newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) for _, pool := range newBackendPools { - backendPool := *pool.ID - matches := backendPoolIDRE.FindStringSubmatch(backendPool) - if len(matches) == 2 { - lbName := matches[1] - if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) - return nil - } + if pool.ID != nil { + newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) } } + isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) + if err != nil { + return err + } + if !isSameLB { + glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + return nil + } } newBackendPools = append(newBackendPools, diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 2c5ac451edaa8..b17bcaff7be36 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -729,17 +729,20 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st // the same network interface couldn't be added to more than one load balancer of // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain // about this. + newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) for _, pool := range newBackendPools { - backendPool := *pool.ID - matches := backendPoolIDRE.FindStringSubmatch(backendPool) - if len(matches) == 2 { - lbName := matches[1] - if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) - return nil - } + if pool.ID != nil { + newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) } } + isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) + if err != nil { + return err + } + if !isSameLB { + glog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmSetName, oldLBName) + return nil + } } newBackendPools = append(newBackendPools, diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index 2d27fad2689bd..f106dc610f42d 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -324,3 +324,29 @@ func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) { func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool { return !azureNodeProviderIDRE.Match([]byte(providerID)) } + +// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools. +// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same. +// If not same, the lbName for existingBackendPools would also be returned. +func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) { + matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID) + if len(matches) != 2 { + return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID) + } + + newLBName := matches[1] + newLBNameTrimmed := strings.TrimRight(newLBName, InternalLoadBalancerNameSuffix) + for _, backendPool := range existingBackendPools { + matches := backendPoolIDRE.FindStringSubmatch(backendPool) + if len(matches) != 2 { + return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool) + } + + lbName := matches[1] + if !strings.EqualFold(strings.TrimRight(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) { + return false, lbName, nil + } + } + + return true, "", nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_wrap_test.go b/pkg/cloudprovider/providers/azure/azure_wrap_test.go index 3ac2bfc6b5535..d2ab2ea3ff3ae 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap_test.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap_test.go @@ -142,3 +142,78 @@ func TestIsNodeUnmanagedByProviderID(t *testing.T) { assert.Equal(t, test.expected, isUnmanagedNode, test.providerID) } } + +func TestIsBackendPoolOnSameLB(t *testing.T) { + tests := []struct { + backendPoolID string + existingBackendPools []string + expected bool + expectedLBName string + expectError bool + }{ + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool2", + }, + expected: true, + expectedLBName: "", + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2/backendAddressPools/pool2", + }, + expected: false, + expectedLBName: "lb2", + }, + { + backendPoolID: "wrong-backendpool-id", + existingBackendPools: []string{ + "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2", + }, + expectError: true, + }, + { + backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1", + existingBackendPools: []string{ + "wrong-existing-backendpool-id", + }, + expectError: true, + }, + { + backendPoolID: "wrong-backendpool-id", + existingBackendPools: []string{ + "wrong-existing-backendpool-id", + }, + expectError: true, + }, + } + + for _, test := range tests { + isSameLB, lbName, err := isBackendPoolOnSameLB(test.backendPoolID, test.existingBackendPools) + if test.expectError { + assert.Error(t, err) + continue + } + + assert.Equal(t, test.expected, isSameLB) + assert.Equal(t, test.expectedLBName, lbName) + } +} From 0a4e882ed76e184fc9333ffdf068d11d2a3ca1f2 Mon Sep 17 00:00:00 2001 From: John McMeeking Date: Thu, 18 Apr 2019 15:10:38 -0500 Subject: [PATCH 66/96] Set CPU metrics for init containers under containerd Copies PR #76503 for release-1.12. metrics-server doesn't return metrics for pods with init containers under containerd because they have incomplete CPU metrics returned by the kubelet /stats/summary API. This problem has been fixed in 1.14 (#74336), but the cherry-picks dropped the usageNanoCores metric. This change adds the missing usageNanoCores metric for init containers in Kubernetes v1.12. Fixes #76292 --- pkg/kubelet/stats/cri_stats_provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index e67192cb5b12e..0a53f6ab3f5df 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -357,6 +357,7 @@ func (p *criStatsProvider) makeContainerStats( } else { result.CPU.Time = metav1.NewTime(time.Unix(0, time.Now().UnixNano())) result.CPU.UsageCoreNanoSeconds = Uint64Ptr(0) + result.CPU.UsageNanoCores = Uint64Ptr(0) } if stats.Memory != nil { result.Memory.Time = metav1.NewTime(time.Unix(0, stats.Memory.Timestamp)) From 8e7ff0858a8243ded9337d4fed3735758981dfec Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Fri, 19 Apr 2019 11:43:14 +0200 Subject: [PATCH 67/96] Restore metrics-server using of IP addresses This preference list matches is used to pick prefered field from k8s node object. It was introduced in metrics-server 0.3 and changed default behaviour to use DNS instead of IP addresses. It was merged into k8s 1.12 and caused breaking change by introducing dependency on DNS configuration. --- cluster/addons/metrics-server/metrics-server-deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index bd412047b6411..75cf436c98d77 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -57,6 +57,7 @@ spec: # Remove these lines for non-GKE clusters, and when GKE supports token-based auth. - --kubelet-port=10255 - --deprecated-kubelet-completely-insecure=true + - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP ports: - containerPort: 443 name: https From f605caa4c2832de92925270efa093f283078d246 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 22 Apr 2019 12:34:02 +0800 Subject: [PATCH 68/96] Revert "Merge pull request #76529 from spencerhance/automated-cherry-pick-of-#72534-#74394-upstream-release-1.12" This reverts commit 535e3ad8319013eb0ade43f6a7b4f8d5c74874a7, reversing changes made to 336d7877e7ebc6db449d2aaa55458f2f0678fc3e. --- pkg/proxy/iptables/proxier.go | 69 +++-- test/e2e/framework/networking_utils.go | 333 +++++++++---------------- test/e2e/framework/service_util.go | 120 ++------- test/e2e/network/firewall.go | 11 - test/e2e/network/service.go | 60 +---- 5 files changed, 181 insertions(+), 412 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 0920e8ace18b7..41f03d49cde00 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -356,36 +356,39 @@ func NewProxier(ipt utiliptables.Interface, } type iptablesJumpChain struct { - table utiliptables.Table - dstChain utiliptables.Chain - srcChain utiliptables.Chain - comment string - extraArgs []string + table utiliptables.Table + chain utiliptables.Chain + sourceChain utiliptables.Chain + comment string + extraArgs []string } var iptablesJumpChains = []iptablesJumpChain{ {utiliptables.TableFilter, kubeExternalServicesChain, utiliptables.ChainInput, "kubernetes externally-visible service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainForward, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", []string{"-m", "conntrack", "--ctstate", "NEW"}}, - {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, {utiliptables.TableNAT, kubeServicesChain, utiliptables.ChainPrerouting, "kubernetes service portals", nil}, {utiliptables.TableNAT, kubePostroutingChain, utiliptables.ChainPostrouting, "kubernetes postrouting rules", nil}, + {utiliptables.TableFilter, kubeForwardChain, utiliptables.ChainForward, "kubernetes forwarding rules", nil}, } -var iptablesCleanupOnlyChains = []iptablesJumpChain{} +var iptablesCleanupOnlyChains = []iptablesJumpChain{ + // Present in kube 1.6 - 1.9. Removed by #56164 in favor of kubeExternalServicesChain + {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainInput, "kubernetes service portals", nil}, + // Present in kube <= 1.9. Removed by #60306 in favor of rule with extraArgs + {utiliptables.TableFilter, kubeServicesChain, utiliptables.ChainOutput, "kubernetes service portals", nil}, +} // CleanupLeftovers removes all iptables rules and chains created by the Proxier // It returns true if an error was encountered. Errors are logged. func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Unlink our chains - for _, jump := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { - args := append(jump.extraArgs, - "-m", "comment", "--comment", jump.comment, - "-j", string(jump.dstChain), + for _, chain := range append(iptablesJumpChains, iptablesCleanupOnlyChains...) { + args := append(chain.extraArgs, + "-m", "comment", "--comment", chain.comment, + "-j", string(chain.chain), ) - if err := ipt.DeleteRule(jump.table, jump.srcChain, args...); err != nil { + if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true @@ -659,17 +662,17 @@ func (proxier *Proxier) syncProxyRules() { glog.V(3).Infof("Syncing iptables rules") // Create and link the kube chains. - for _, jump := range iptablesJumpChains { - if _, err := proxier.iptables.EnsureChain(jump.table, jump.dstChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", jump.table, jump.dstChain, err) + for _, chain := range iptablesJumpChains { + if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) return } - args := append(jump.extraArgs, - "-m", "comment", "--comment", jump.comment, - "-j", string(jump.dstChain), + args := append(chain.extraArgs, + "-m", "comment", "--comment", chain.comment, + "-j", string(chain.chain), ) - if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jump.table, jump.srcChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jump.table, jump.srcChain, jump.dstChain, err) + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) return } } @@ -827,7 +830,6 @@ func (proxier *Proxier) syncProxyRules() { } writeLine(proxier.natRules, append(args, "-j", string(svcChain))...) } else { - // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), @@ -898,7 +900,6 @@ func (proxier *Proxier) syncProxyRules() { // This covers cases like GCE load-balancers which get added to the local routing table. writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...) } else { - // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeExternalServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), @@ -911,10 +912,10 @@ func (proxier *Proxier) syncProxyRules() { } // Capture load-balancer ingress. - fwChain := svcInfo.serviceFirewallChainName - for _, ingress := range svcInfo.LoadBalancerStatus.Ingress { - if ingress.IP != "" { - if hasEndpoints { + if hasEndpoints { + fwChain := svcInfo.serviceFirewallChainName + for _, ingress := range svcInfo.LoadBalancerStatus.Ingress { + if ingress.IP != "" { // create service firewall chain if chain, ok := existingNATChains[fwChain]; ok { writeBytesLine(proxier.natChains, chain) @@ -975,19 +976,10 @@ func (proxier *Proxier) syncProxyRules() { // If the packet was able to reach the end of firewall chain, then it did not get DNATed. // It means the packet cannot go thru the firewall, then mark it for DROP writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...) - } else { - // No endpoints. - writeLine(proxier.filterRules, - "-A", string(kubeServicesChain), - "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), - "-m", protocol, "-p", protocol, - "-d", utilproxy.ToCIDR(net.ParseIP(ingress.IP)), - "--dport", strconv.Itoa(svcInfo.Port), - "-j", "REJECT", - ) } } } + // FIXME: do we need REJECT rules for load-balancer ingress if !hasEndpoints? // Capture nodeports. If we had more than 2 rules it might be // worthwhile to make a new per-service chain for nodeport rules, but @@ -1069,7 +1061,6 @@ func (proxier *Proxier) syncProxyRules() { writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...) } } else { - // No endpoints. writeLine(proxier.filterRules, "-A", string(kubeExternalServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString), diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 9acef9353584e..0ce339842a7e8 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -17,6 +17,7 @@ limitations under the License. package framework import ( + "bytes" "encoding/json" "fmt" "io/ioutil" @@ -707,127 +708,88 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n Expect(err).NotTo(HaveOccurred()) } -type HTTPPokeParams struct { - Timeout time.Duration - ExpectCode int // default = 200 - BodyContains string - RetriableCodes []int +// Does an HTTP GET, but does not reuse TCP connections +// This masks problems where the iptables rule has changed, but we don't see it +// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout +func httpGetNoConnectionPool(url string) (*http.Response, error) { + return httpGetNoConnectionPoolTimeout(url, 5*time.Second) +} + +func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { + tr := utilnet.SetTransportDefaults(&http.Transport{ + DisableKeepAlives: true, + }) + client := &http.Client{ + Transport: tr, + Timeout: timeout, + } + + return client.Get(url) } -type HTTPPokeResult struct { - Status HTTPPokeStatus - Code int // HTTP code: 0 if the connection was not made - Error error // if there was any error - Body []byte // if code != 0 +func TestReachableHTTP(ip string, port int, request string, expect string) (bool, error) { + return TestReachableHTTPWithContent(ip, port, request, expect, nil) } -type HTTPPokeStatus string +func TestReachableHTTPWithRetriableErrorCodes(ip string, port int, request string, expect string, retriableErrCodes []int) (bool, error) { + return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, nil, retriableErrCodes, time.Second*5) +} -const ( - HTTPSuccess HTTPPokeStatus = "Success" - HTTPError HTTPPokeStatus = "UnknownError" - // Any time we add new errors, we should audit all callers of this. - HTTPTimeout HTTPPokeStatus = "TimedOut" - HTTPRefused HTTPPokeStatus = "ConnectionRefused" - HTTPRetryCode HTTPPokeStatus = "RetryCode" - HTTPWrongCode HTTPPokeStatus = "WrongCode" - HTTPBadResponse HTTPPokeStatus = "BadResponse" -) +func TestReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) { + return TestReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second) +} -// PokeHTTP tries to connect to a host on a port for a given URL path. Callers -// can specify additional success parameters, if desired. -// -// The result status will be characterized as precisely as possible, given the -// known users of this. -// -// The result code will be zero in case of any failure to connect, or non-zero -// if the HTTP transaction completed (even if the other test params make this a -// failure). -// -// The result error will be populated for any status other than Success. -// -// The result body will be populated if the HTTP transaction was completed, even -// if the other test params make this a failure). -func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPokeResult { - hostPort := net.JoinHostPort(host, strconv.Itoa(port)) - url := fmt.Sprintf("http://%s%s", hostPort, path) - - ret := HTTPPokeResult{} - - // Sanity check inputs, because it has happened. These are the only things - // that should hard fail the test - they are basically ASSERT()s. - if host == "" { - Failf("Got empty host for HTTP poke (%s)", url) - return ret - } - if port == 0 { - Failf("Got port==0 for HTTP poke (%s)", url) - return ret - } +func TestReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) { + return TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip, port, request, expect, content, []int{}, timeout) +} + +func TestReachableHTTPWithContentTimeoutWithRetriableErrorCodes(ip string, port int, request string, expect string, content *bytes.Buffer, retriableErrCodes []int, timeout time.Duration) (bool, error) { - // Set default params. - if params == nil { - params = &HTTPPokeParams{} + ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) + url := fmt.Sprintf("http://%s%s", ipPort, request) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", url) + return false, nil } - if params.ExpectCode == 0 { - params.ExpectCode = http.StatusOK + if port == 0 { + Failf("Got port==0 for reachability check (%s)", url) + return false, nil } - Logf("Poking %q", url) + Logf("Testing HTTP reachability of %v", url) - resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout) + resp, err := httpGetNoConnectionPoolTimeout(url, timeout) if err != nil { - ret.Error = err - neterr, ok := err.(net.Error) - if ok && neterr.Timeout() { - ret.Status = HTTPTimeout - } else if strings.Contains(err.Error(), "connection refused") { - ret.Status = HTTPRefused - } else { - ret.Status = HTTPError - } - Logf("Poke(%q): %v", url, err) - return ret + Logf("Got error testing for reachability of %s: %v", url, err) + return false, nil } - - ret.Code = resp.StatusCode - defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - ret.Status = HTTPError - ret.Error = fmt.Errorf("error reading HTTP body: %v", err) - Logf("Poke(%q): %v", url, ret.Error) - return ret + Logf("Got error reading response from %s: %v", url, err) + return false, nil } - ret.Body = make([]byte, len(body)) - copy(ret.Body, body) - - if resp.StatusCode != params.ExpectCode { - for _, code := range params.RetriableCodes { + if resp.StatusCode != 200 { + for _, code := range retriableErrCodes { if resp.StatusCode == code { - ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode) - ret.Status = HTTPRetryCode - Logf("Poke(%q): %v", url, ret.Error) - return ret + Logf("Got non-success status %q when trying to access %s, but the error code is retriable", resp.Status, url) + return false, nil } } - ret.Status = HTTPWrongCode - ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode) - Logf("Poke(%q): %v", url, ret.Error) - return ret + return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", + resp.Status, url, string(body)) } - - if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) { - ret.Status = HTTPBadResponse - ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body)) - Logf("Poke(%q): %v", url, ret.Error) - return ret + if !strings.Contains(string(body), expect) { + return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) + } + if content != nil { + content.Write(body) } + return true, nil +} - ret.Status = HTTPSuccess - Logf("Poke(%q): success", url) - return ret +func TestNotReachableHTTP(ip string, port int) (bool, error) { + return TestNotReachableHTTPTimeout(ip, port, 5*time.Second) } func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) { @@ -853,140 +815,90 @@ func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bo return false, nil } -// Does an HTTP GET, but does not reuse TCP connections -// This masks problems where the iptables rule has changed, but we don't see it -func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) { - tr := utilnet.SetTransportDefaults(&http.Transport{ - DisableKeepAlives: true, - }) - client := &http.Client{ - Transport: tr, - Timeout: timeout, +func TestReachableUDP(ip string, port int, request string, expect string) (bool, error) { + ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) + uri := fmt.Sprintf("udp://%s", ipPort) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", uri) + return false, nil + } + if port == 0 { + Failf("Got port==0 for reachability check (%s)", uri) + return false, nil } - return client.Get(url) -} + Logf("Testing UDP reachability of %v", uri) -type UDPPokeParams struct { - Timeout time.Duration - Response string -} + con, err := net.Dial("udp", ipPort) + if err != nil { + return false, fmt.Errorf("Failed to dial %s: %v", ipPort, err) + } -type UDPPokeResult struct { - Status UDPPokeStatus - Error error // if there was any error - Response []byte // if code != 0 -} + _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) + if err != nil { + return false, fmt.Errorf("Failed to send request: %v", err) + } -type UDPPokeStatus string + var buf []byte = make([]byte, len(expect)+1) -const ( - UDPSuccess UDPPokeStatus = "Success" - UDPError UDPPokeStatus = "UnknownError" - // Any time we add new errors, we should audit all callers of this. - UDPTimeout UDPPokeStatus = "TimedOut" - UDPRefused UDPPokeStatus = "ConnectionRefused" - UDPBadResponse UDPPokeStatus = "BadResponse" -) + err = con.SetDeadline(time.Now().Add(3 * time.Second)) + if err != nil { + return false, fmt.Errorf("Failed to set deadline: %v", err) + } -// PokeUDP tries to connect to a host on a port and send the given request. Callers -// can specify additional success parameters, if desired. -// -// The result status will be characterized as precisely as possible, given the -// known users of this. -// -// The result error will be populated for any status other than Success. -// -// The result response will be populated if the UDP transaction was completed, even -// if the other test params make this a failure). -func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPokeResult { - hostPort := net.JoinHostPort(host, strconv.Itoa(port)) - url := fmt.Sprintf("udp://%s", hostPort) - - ret := UDPPokeResult{} - - // Sanity check inputs, because it has happened. These are the only things - // that should hard fail the test - they are basically ASSERT()s. - if host == "" { - Failf("Got empty host for UDP poke (%s)", url) - return ret + _, err = con.Read(buf) + if err != nil { + return false, nil } - if port == 0 { - Failf("Got port==0 for UDP poke (%s)", url) - return ret + + if !strings.Contains(string(buf), expect) { + return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) } - // Set default params. - if params == nil { - params = &UDPPokeParams{} + Logf("Successfully reached %v", uri) + return true, nil +} + +func TestNotReachableUDP(ip string, port int, request string) (bool, error) { + ipPort := net.JoinHostPort(ip, strconv.Itoa(port)) + uri := fmt.Sprintf("udp://%s", ipPort) + if ip == "" { + Failf("Got empty IP for reachability check (%s)", uri) + return false, nil + } + if port == 0 { + Failf("Got port==0 for reachability check (%s)", uri) + return false, nil } - Logf("Poking %v", url) + Logf("Testing UDP non-reachability of %v", uri) - con, err := net.Dial("udp", hostPort) + con, err := net.Dial("udp", ipPort) if err != nil { - ret.Status = UDPError - ret.Error = err - Logf("Poke(%q): %v", url, err) - return ret + Logf("Confirmed that %s is not reachable", uri) + return true, nil } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { - ret.Error = err - neterr, ok := err.(net.Error) - if ok && neterr.Timeout() { - ret.Status = UDPTimeout - } else if strings.Contains(err.Error(), "connection refused") { - ret.Status = UDPRefused - } else { - ret.Status = UDPError - } - Logf("Poke(%q): %v", url, err) - return ret + Logf("Confirmed that %s is not reachable", uri) + return true, nil } - if params.Timeout != 0 { - err = con.SetDeadline(time.Now().Add(params.Timeout)) - if err != nil { - ret.Status = UDPError - ret.Error = err - Logf("Poke(%q): %v", url, err) - return ret - } - } + var buf []byte = make([]byte, 1) - bufsize := len(params.Response) + 1 - if bufsize == 0 { - bufsize = 4096 - } - var buf []byte = make([]byte, bufsize) - n, err := con.Read(buf) + err = con.SetDeadline(time.Now().Add(3 * time.Second)) if err != nil { - ret.Error = err - neterr, ok := err.(net.Error) - if ok && neterr.Timeout() { - ret.Status = UDPTimeout - } else if strings.Contains(err.Error(), "connection refused") { - ret.Status = UDPRefused - } else { - ret.Status = UDPError - } - Logf("Poke(%q): %v", url, err) - return ret + return false, fmt.Errorf("Failed to set deadline: %v", err) } - ret.Response = buf[0:n] - if params.Response != "" && string(ret.Response) != params.Response { - ret.Status = UDPBadResponse - ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response)) - Logf("Poke(%q): %v", url, ret.Error) - return ret + _, err = con.Read(buf) + if err != nil { + Logf("Confirmed that %s is not reachable", uri) + return true, nil } - ret.Status = UDPSuccess - Logf("Poke(%q): success", url) - return ret + return false, nil } func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error { @@ -999,12 +911,13 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout hittedHosts := sets.NewString() count := 0 condition := func() (bool, error) { - result := PokeHTTP(externalIP, int(httpPort), "/hostname", &HTTPPokeParams{Timeout: 1 * time.Second}) - if result.Status != HTTPSuccess { + var respBody bytes.Buffer + reached, err := TestReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody, + 1*time.Second) + if err != nil || !reached { return false, nil } - - hittedHost := strings.TrimSpace(string(result.Body)) + hittedHost := strings.TrimSpace(respBody.String()) if !expectedHosts.Has(hittedHost) { Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count) count = 0 diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 7ff873b6e8084..01b6e90201dc1 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -598,7 +598,6 @@ func (j *ServiceTestJig) waitForConditionOrFail(namespace, name string, timeout // name as the jig and runs the "netexec" container. func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController { var replicas int32 = 1 - var grace int64 = 3 // so we don't race with kube-proxy when scaling up/down rc := &v1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ @@ -630,7 +629,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationControll }, }, }, - TerminationGracePeriodSeconds: &grace, + TerminationGracePeriodSeconds: new(int64), }, }, }, @@ -713,28 +712,6 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.Replicati return result } -func (j *ServiceTestJig) Scale(namespace string, replicas int) { - rc := j.Name - scale, err := j.Client.CoreV1().ReplicationControllers(namespace).GetScale(rc, metav1.GetOptions{}) - if err != nil { - Failf("Failed to get scale for RC %q: %v", rc, err) - } - - scale.Spec.Replicas = int32(replicas) - _, err = j.Client.CoreV1().ReplicationControllers(namespace).UpdateScale(rc, scale) - if err != nil { - Failf("Failed to scale RC %q: %v", rc, err) - } - pods, err := j.waitForPodsCreated(namespace, replicas) - if err != nil { - Failf("Failed waiting for pods: %v", err) - } - if err := j.waitForPodsReady(namespace, pods); err != nil { - Failf("Failed waiting for pods to be running: %v", err) - } - return -} - func (j *ServiceTestJig) waitForPdbReady(namespace string) error { timeout := 2 * time.Minute for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { @@ -873,19 +850,9 @@ func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.D } func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, port int, retriableErrCodes []int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/echo?msg=hello", - &HTTPPokeParams{ - BodyContains: "hello", - RetriableCodes: retriableErrCodes, - }) - if result.Status == HTTPSuccess { - return true, nil - } - return false, nil // caller can retry - } - - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { + return TestReachableHTTPWithRetriableErrorCodes(host, port, "/echo?msg=hello", "hello", retriableErrCodes) + }); err != nil { if err == wait.ErrWaitTimeout { Failf("Could not reach HTTP service through %v:%v after %v", host, port, timeout) } else { @@ -895,87 +862,36 @@ func (j *ServiceTestJig) TestReachableHTTPWithRetriableErrorCodes(host string, p } func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/", nil) - if result.Code == 0 { - return true, nil - } - return false, nil // caller can retry - } - - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { - Failf("HTTP service %v:%v reachable after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) TestRejectedHTTP(host string, port int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeHTTP(host, port, "/", nil) - if result.Status == HTTPRefused { - return true, nil - } - return false, nil // caller can retry - } - - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { - Failf("HTTP service %v:%v not rejected: %v", host, port, err) + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableHTTP(host, port) }); err != nil { + Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{ - Timeout: 3 * time.Second, - Response: "hello", - }) - if result.Status == UDPSuccess { - return true, nil - } - return false, nil // caller can retry - } - - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestReachableUDP(host, port, "echo hello", "hello") }); err != nil { Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) - if result.Status != UDPSuccess && result.Status != UDPError { - return true, nil - } - return false, nil // caller can retry - } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { - Failf("UDP service %v:%v reachable after %v: %v", host, port, timeout, err) - } -} - -func (j *ServiceTestJig) TestRejectedUDP(host string, port int, timeout time.Duration) { - pollfn := func() (bool, error) { - result := PokeUDP(host, port, "echo hello", &UDPPokeParams{Timeout: 3 * time.Second}) - if result.Status == UDPRefused { - return true, nil - } - return false, nil // caller can retry - } - if err := wait.PollImmediate(Poll, timeout, pollfn); err != nil { - Failf("UDP service %v:%v not rejected: %v", host, port, err) + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { return TestNotReachableUDP(host, port, "echo hello") }); err != nil { + Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) GetHTTPContent(host string, port int, timeout time.Duration, url string) bytes.Buffer { var body bytes.Buffer + var err error if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) { - result := PokeHTTP(host, port, url, nil) - if result.Status == HTTPSuccess { - body.Write(result.Body) - return true, nil + var result bool + result, err = TestReachableHTTPWithContent(host, port, url, "", &body) + if err != nil { + Logf("Error hitting %v:%v%v, retrying: %v", host, port, url, err) + return false, nil } - return false, nil + return result, nil }); pollErr != nil { - Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, pollErr) + Failf("Could not reach HTTP service through %v:%v%v after %v: %v", host, port, url, timeout, err) } return body } @@ -988,7 +904,7 @@ func testHTTPHealthCheckNodePort(ip string, port int, request string) (bool, err return false, fmt.Errorf("Invalid input ip or port") } Logf("Testing HTTP health check on %v", url) - resp, err := httpGetNoConnectionPoolTimeout(url, 5*time.Second) + resp, err := httpGetNoConnectionPool(url) if err != nil { Logf("Got error testing for reachability of %s: %v", url, err) return false, err diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index c4f24dd4895b5..f665ff3c6d128 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -18,7 +18,6 @@ package network import ( "fmt" - "time" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -183,13 +182,3 @@ var _ = SIGDescribe("Firewall rule", func() { Expect(flag).To(BeTrue()) }) }) - -func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { - result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout}) - if result.Status == framework.HTTPError { - framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) - } - if result.Code != 0 { - framework.Failf("Was unexpectedly able to reach %s:%d", ip, port) - } -} diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index cf62ddc4eee53..441a00bc59e6d 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -789,47 +789,11 @@ var _ = SIGDescribe("Services", func() { jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB if loadBalancerSupportsUDP { By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) - } - - By("Scaling the pods to 0") - jig.Scale(ns1, 0) - jig.Scale(ns2, 0) - - By("looking for ICMP REJECT on the TCP service's NodePort") - jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - - By("looking for ICMP REJECT on the UDP service's NodePort") - jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - - By("looking for ICMP REJECT on the TCP service's LoadBalancer") - jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) - - if loadBalancerSupportsUDP { - By("looking for ICMP REJECT on the UDP service's LoadBalancer") - jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) - } - - By("Scaling the pods to 1") - jig.Scale(ns1, 1) - jig.Scale(ns2, 1) - - By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - - By("hitting the UDP service's NodePort") - jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - - By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) - - if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") - jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) + jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) } // Change the services back to ClusterIP. @@ -1967,18 +1931,14 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for nodeName, nodeIPs := range endpointNodeMap { By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) var body bytes.Buffer - pollfn := func() (bool, error) { - result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) - if result.Code == 0 { - return true, nil - } - body.Reset() - body.Write(result.Body) - return false, nil - } - if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil { - framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", - nodeName, healthCheckNodePort, body.String()) + var result bool + var err error + if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, func() (bool, error) { + result, err = framework.TestReachableHTTPWithContent(nodeIPs[0], healthCheckNodePort, "/healthz", "", &body) + return !result, nil + }); pollErr != nil { + framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. Last err %v, last body %v", + nodeName, healthCheckNodePort, err, body.String()) } } From a62b33f62b04af958da80a9ff7de4ded005a28eb Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Tue, 23 Apr 2019 04:40:55 +0000 Subject: [PATCH 69/96] Kubernetes version v1.12.9-beta.0 openapi-spec file updates --- api/openapi-spec/swagger.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 4a867e381aab5..5d6b97ca76c55 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubernetes", - "version": "v1.12.8" + "version": "v1.12.9" }, "paths": { "/api/": { From 655856a0d524b537518430d80798b16ce2e91fc3 Mon Sep 17 00:00:00 2001 From: Anago GCB Date: Tue, 23 Apr 2019 06:09:34 +0000 Subject: [PATCH 70/96] Add/Update CHANGELOG-1.12.md for v1.12.8. --- CHANGELOG-1.12.md | 172 +++++++++++++++++++++++++++++++++------------- 1 file changed, 126 insertions(+), 46 deletions(-) diff --git a/CHANGELOG-1.12.md b/CHANGELOG-1.12.md index ce04fceab3928..61128f607be45 100644 --- a/CHANGELOG-1.12.md +++ b/CHANGELOG-1.12.md @@ -1,59 +1,66 @@ -- [v1.12.7](#v1127) - - [Downloads for v1.12.7](#downloads-for-v1127) +- [v1.12.8](#v1128) + - [Downloads for v1.12.8](#downloads-for-v1128) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.12.6](#changelog-since-v1126) + - [Changelog since v1.12.7](#changelog-since-v1127) - [Other notable changes](#other-notable-changes) -- [v1.12.6](#v1126) - - [Downloads for v1.12.6](#downloads-for-v1126) +- [v1.12.7](#v1127) + - [Downloads for v1.12.7](#downloads-for-v1127) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.12.5](#changelog-since-v1125) + - [Changelog since v1.12.6](#changelog-since-v1126) - [Other notable changes](#other-notable-changes-1) -- [v1.12.5](#v1125) - - [Downloads for v1.12.5](#downloads-for-v1125) +- [v1.12.6](#v1126) + - [Downloads for v1.12.6](#downloads-for-v1126) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.12.4](#changelog-since-v1124) + - [Changelog since v1.12.5](#changelog-since-v1125) - [Other notable changes](#other-notable-changes-2) -- [v1.12.4](#v1124) - - [Downloads for v1.12.4](#downloads-for-v1124) +- [v1.12.5](#v1125) + - [Downloads for v1.12.5](#downloads-for-v1125) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.12.3](#changelog-since-v1123) - - [Action Required](#action-required) + - [Changelog since v1.12.4](#changelog-since-v1124) - [Other notable changes](#other-notable-changes-3) -- [v1.12.3](#v1123) - - [Downloads for v1.12.3](#downloads-for-v1123) +- [v1.12.4](#v1124) + - [Downloads for v1.12.4](#downloads-for-v1124) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.12.2](#changelog-since-v1122) + - [Changelog since v1.12.3](#changelog-since-v1123) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-4) -- [v1.12.2](#v1122) - - [Downloads for v1.12.2](#downloads-for-v1122) +- [v1.12.3](#v1123) + - [Downloads for v1.12.3](#downloads-for-v1123) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.12.1](#changelog-since-v1121) + - [Changelog since v1.12.2](#changelog-since-v1122) - [Other notable changes](#other-notable-changes-5) -- [v1.12.1](#v1121) - - [Downloads for v1.12.1](#downloads-for-v1121) +- [v1.12.2](#v1122) + - [Downloads for v1.12.2](#downloads-for-v1122) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.12.0](#changelog-since-v1120) + - [Changelog since v1.12.1](#changelog-since-v1121) - [Other notable changes](#other-notable-changes-6) -- [v1.12.0](#v1120) - - [Downloads for v1.12.0](#downloads-for-v1120) +- [v1.12.1](#v1121) + - [Downloads for v1.12.1](#downloads-for-v1121) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) + - [Changelog since v1.12.0](#changelog-since-v1120) + - [Other notable changes](#other-notable-changes-7) +- [v1.12.0](#v1120) + - [Downloads for v1.12.0](#downloads-for-v1120) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Known Issues](#known-issues) - [Major Themes](#major-themes) - [SIG API Machinery](#sig-api-machinery) @@ -75,7 +82,7 @@ - [Deprecations and removals](#deprecations-and-removals) - [New Features](#new-features) - [API Changes](#api-changes) - - [Other Notable Changes](#other-notable-changes-7) + - [Other Notable Changes](#other-notable-changes-8) - [SIG API Machinery](#sig-api-machinery-1) - [SIG Apps](#sig-apps) - [SIG Auth](#sig-auth) @@ -94,54 +101,127 @@ - [SIG Storage](#sig-storage-1) - [SIG VMWare](#sig-vmware-1) - [SIG Windows](#sig-windows-1) - - [Other Notable Changes](#other-notable-changes-8) + - [Other Notable Changes](#other-notable-changes-9) - [Bug Fixes](#bug-fixes) - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) - [External Dependencies](#external-dependencies) - [v1.12.0-rc.2](#v1120-rc2) - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - - [Client Binaries](#client-binaries-8) - - [Server Binaries](#server-binaries-8) - - [Node Binaries](#node-binaries-8) - - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - - [Other notable changes](#other-notable-changes-9) -- [v1.12.0-rc.1](#v1120-rc1) - - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) - - [Action Required](#action-required-2) + - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) - [Other notable changes](#other-notable-changes-10) -- [v1.12.0-beta.2](#v1120-beta2) - - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) +- [v1.12.0-rc.1](#v1120-rc1) + - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - - [Action Required](#action-required-3) + - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-11) -- [v1.12.0-beta.1](#v1120-beta1) - - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) +- [v1.12.0-beta.2](#v1120-beta2) + - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - - [Action Required](#action-required-4) + - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-12) -- [v1.12.0-alpha.1](#v1120-alpha1) - - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) +- [v1.12.0-beta.1](#v1120-beta1) + - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-13) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Client Binaries](#client-binaries-13) + - [Server Binaries](#server-binaries-13) + - [Node Binaries](#node-binaries-13) - [Changelog since v1.11.0](#changelog-since-v1110) - [Action Required](#action-required-5) - - [Other notable changes](#other-notable-changes-13) + - [Other notable changes](#other-notable-changes-14) +# v1.12.8 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.8 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes.tar.gz) | `0f14f54bcd3ef8260e424ccb9be4d1d7ad8d03e15d00d081fdf564adc319ca4040d404f37466a2342650d08d5c41b1d411f172ff78e611b05fca8fd5404590d9` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-src.tar.gz) | `10b6ce78a906effbb38600d8e496c49e9739fffaba8d44eff54d298b0f899481b9e4cc60eb918586f3d1055f4db44880fd2b42ad40a391aadfd8a53c584c8c1c` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-darwin-386.tar.gz) | `bfb3680c47f674773c50c446577eb3f10468a6fd367a2ee7f851d299f4ff04071757962ddff10659b185ab80e4fc474f10354273560803101b66c9c939279e08` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-darwin-amd64.tar.gz) | `b6543d97975add3a27f75ff6fcc7c3caeb8749ac88967cb79a6688ba4ba1837fda3582a0f5588073a855a2da43c9b353b565974b7a29f619709f862d8ce1e0b3` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-386.tar.gz) | `57358d71b4c19d826e4979b1ef3f33b5b1e05c50ba257d6bbfa8d76f15849ebcba389c55f1be50fdc77a311935a0e7ecc827a3f35ee5896a6ceda7580d8b4680` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-amd64.tar.gz) | `55e6c2ec67aa3283e3b6904418b35845fa14f5faaed0cf503a7adb4e52842f7c3aaa5fbbfdbcf508794c784d93bec48e27e598879e89302c48f54eebdef69d3d` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-arm.tar.gz) | `0a7b54f8846ddf9d6ef6df863a0211ab448dfbdeeaf78ec163b4e46fa4d7f92611f71ac757bb00d6dfee6314c78ac12cf50020d8d6c9b1dbac550425ccb53743` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-arm64.tar.gz) | `ad68df3f56c2622a01f54a8575c7cec3b9f508c1332bd16cf3f39b9e3f66dae3b495fc1dce3d69504f18b0feb281268fed306538db538d01e74210be45bafb97` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-ppc64le.tar.gz) | `1452011ed3f37984ff9493df0d490eefb8a5c0d84c2f87d9ff47ffe9924a14d918c5dfa755494c05975a10b191d75173d0d30be3449e36cffff4b0495f22efa8` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-s390x.tar.gz) | `edca658d8f91dd4939c6eba444b2b56a30304d3d0c42607e823acf64dace852cc66a8f14d4bf2fc2bdf0c99bbbe4a9625c86f38535d5b24e8c3b95e76193e530` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-windows-386.tar.gz) | `14e5daaf4623d11380b552fc3fa5ad6bf98488dcf365c8cfa8d7f1d26fe73b317e5cfeb3e46f4e9d582e2a04cd70bc2ed3dfb915c88aacd997324ca8c2582d52` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-windows-amd64.tar.gz) | `1a1d4457620daf2f54e11b2ef790f30890bca71502f86a3c0163a4e6a5afb701c3d60511b944eb4b80c9418e7ee6864d44aba26deacd44a717a5c8c4850794af` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-amd64.tar.gz) | `8d1a70cfa9012282f679d876ae070f7830aad11ef64f437b90320ccae5253a3f527df0abb56f34004ccb2113e195638b6ca69aad9aff85f9dcb588aacca81d55` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-arm.tar.gz) | `67022706b4bf98aba305fd3759940ce396e35474814ced4152152b4cc536d79e1b4e3a4027e45af3637ea006fbabadca34d8ecc6874138300230b2b0bcd2dcb5` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-arm64.tar.gz) | `00d7e79fa71f4265b8ba5cc2e62c2ab4b5d1076bddc8155a3b7a5e589c34446860c25571b972566e694709230d32de763ac3ba0a97a2cc2cbc6c7b431b30a1e0` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-ppc64le.tar.gz) | `8add81c5f767dbdd04ac39f07aa4855be86c91f848c2e331d40734e85d0d6c7ea5cd0c575ab49b101c1e6ba5224eb1762f8f73f39610c773b76f7d1ebffc86cc` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-s390x.tar.gz) | `63e58ea49072ac058e74b989f9a74887b27c52d56923f44a7d53cb384915f4a2425e65d6e9f6642d4fbac102bb9a45baad901a32a5414989c0b2d2cc57ffa59f` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-amd64.tar.gz) | `6677af5330149f39c6d84722e5418bf35caf4d431fb97fd0df102373a5faaf4a8344921bc2a51290abe521620f6543c482a54720692d245ff36142019fcc0c19` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-arm.tar.gz) | `aa0f2abaec8ac765acffe1c6ff00c01cd74befba98a5c7afb30f716bd37f9094e1c314df7f3b7c8361c86e6c78f9aa246623e5f1934d60148663851810aa4815` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-arm64.tar.gz) | `c4bb230afcf78414461b32cedd0564a58e02e82b0a679ea42f977e3bc501cc4857694774dad423b4a76542a4698929c8f5429b9737f1e324d21e39afbc5be48f` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-ppc64le.tar.gz) | `31d4bdc1528ee8d4ab4ee16e3da08c1e8c026eaafd88950836f19e10bf3e87d12876a25e2c90a81529a9d26f8465cf50e8997e8c14fb555f21d69323a885f2eb` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-s390x.tar.gz) | `d7b8a81b14a12578ca6273dc32703f906627e244ed00639436fb3cb38d4b4aa55d7a857f9a844844bc2d463619b890329043b78c9ec8ff0f5b38dc55b572cd71` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-windows-amd64.tar.gz) | `9ca9ef41a42d8cb5c15533a10848170fa0f7c1e4eccbc8d6269ce085ab7670e446473e4e240b3bb1905dda3078725976b0506a27d67e6e3a2fd546aaa6678d84` + +## Changelog since v1.12.7 + +### Other notable changes + +* Connections from Pods to Services with 0 endpoints will now ICMP reject immediately, rather than blackhole and timeout. ([#72534](https://github.com/kubernetes/kubernetes/pull/72534), [@thockin](https://github.com/thockin)) +* Services of type=LoadBalancer which have no endpoints will now immediately ICMP reject connections, rather than time out. ([#74394](https://github.com/kubernetes/kubernetes/pull/74394), [@thockin](https://github.com/thockin)) +* Ensure the backend pools are set correctly for Azure SLB with multiple backend pools (e.g. outbound rules) ([#76691](https://github.com/kubernetes/kubernetes/pull/76691), [@feiskyer](https://github.com/feiskyer)) +* Connections from Pods to Services with 0 endpoints will now ICMP reject immediately, rather than blackhole and timeout. ([#72534](https://github.com/kubernetes/kubernetes/pull/72534), [@thockin](https://github.com/thockin)) +* Services of type=LoadBalancer which have no endpoints will now immediately ICMP reject connections, rather than time out. ([#74394](https://github.com/kubernetes/kubernetes/pull/74394), [@thockin](https://github.com/thockin)) +* fix race condition issue for smb mount on windows ([#75371](https://github.com/kubernetes/kubernetes/pull/75371), [@andyzhangx](https://github.com/andyzhangx)) +* fix smb unmount issue on Windows ([#75087](https://github.com/kubernetes/kubernetes/pull/75087), [@andyzhangx](https://github.com/andyzhangx)) +* Increase Azure default maximumLoadBalancerRuleCount to 250. ([#72621](https://github.com/kubernetes/kubernetes/pull/72621), [@feiskyer](https://github.com/feiskyer)) +* Fixes bug in DaemonSetController causing it to stop processing some DaemonSets for 5 minutes after node removal. ([#76060](https://github.com/kubernetes/kubernetes/pull/76060), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +* Fixes a NPD bug on GCI, so that it disables glog writing to files for log-counter ([#76211](https://github.com/kubernetes/kubernetes/pull/76211), [@wangzhen127](https://github.com/wangzhen127)) +* [stackdriver addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. ([#75362](https://github.com/kubernetes/kubernetes/pull/75362), [@serathius](https://github.com/serathius)) + * [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.1 to pick up security fixes. + * [fluentd-gcp addon] Bump event-exporter to v0.2.4 to pick up security fixes. + * [fluentd-gcp addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. + * [metatada-proxy addon] Bump prometheus-to-sd v0.5.0 to pick up security fixes. +* Fixed parsing of fsType in AWS StorageClass parameters ([#75944](https://github.com/kubernetes/kubernetes/pull/75944), [@jsafrane](https://github.com/jsafrane)) +* Node-Problem-Detector configuration is now decoupled from the Kubernetes release on GKE/GCE. ([#73288](https://github.com/kubernetes/kubernetes/pull/73288), [@wangzhen127](https://github.com/wangzhen127)) +* [IPVS] Allow for transparent kube-proxy restarts ([#75283](https://github.com/kubernetes/kubernetes/pull/75283), [@lbernail](https://github.com/lbernail)) + + + # v1.12.7 [Documentation](https://docs.k8s.io) From a45702a785282299b27ef0d14b0a27e2bae4578a Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Sun, 21 Apr 2019 18:12:29 +0800 Subject: [PATCH 71/96] Upgrade compute API to version 2019-03-01 --- pkg/cloudprovider/providers/azure/BUILD | 5 +++-- pkg/cloudprovider/providers/azure/azure_backoff.go | 2 +- pkg/cloudprovider/providers/azure/azure_client.go | 2 +- pkg/cloudprovider/providers/azure/azure_controller_common.go | 2 +- .../providers/azure/azure_controller_standard.go | 2 +- pkg/cloudprovider/providers/azure/azure_controller_vmss.go | 2 +- pkg/cloudprovider/providers/azure/azure_fakes.go | 2 +- pkg/cloudprovider/providers/azure/azure_instances_test.go | 2 +- .../providers/azure/azure_managedDiskController.go | 4 ++-- pkg/cloudprovider/providers/azure/azure_standard.go | 2 +- pkg/cloudprovider/providers/azure/azure_test.go | 2 +- pkg/cloudprovider/providers/azure/azure_vmsets.go | 4 +++- pkg/cloudprovider/providers/azure/azure_vmss.go | 2 +- pkg/cloudprovider/providers/azure/azure_vmss_test.go | 2 +- pkg/cloudprovider/providers/azure/azure_wrap.go | 2 +- pkg/volume/azure_dd/BUILD | 4 ++-- pkg/volume/azure_dd/attacher.go | 2 +- pkg/volume/azure_dd/azure_dd.go | 4 ++-- pkg/volume/azure_dd/azure_dd_test.go | 2 +- 19 files changed, 26 insertions(+), 23 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index ca864212f973c..c183c84996cb6 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -59,7 +59,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", @@ -79,6 +79,7 @@ go_test( srcs = [ "azure_backoff_test.go", "azure_cache_test.go", + "azure_controller_common_test.go", "azure_instances_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", @@ -102,7 +103,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 6e37916497fa1..fbe0e41c601f0 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -21,7 +21,7 @@ import ( "fmt" "net/http" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/golang/glog" diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 19d5e7a158ae2..e76a0854e8144 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -22,7 +22,7 @@ import ( "net/http" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" "github.com/Azure/go-autorest/autorest" diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index 1e983fba6daad..d52d2555913a3 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 604cfa5d4eb67..90c68da099802 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index 219f9a4d87adf..485344641c424 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 6d51fc56ea920..0e63e5e431129 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/cloudprovider" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" "github.com/Azure/go-autorest/autorest" diff --git a/pkg/cloudprovider/providers/azure/azure_instances_test.go b/pkg/cloudprovider/providers/azure/azure_instances_test.go index 4eaf840348e60..71f8114a3d464 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances_test.go +++ b/pkg/cloudprovider/providers/azure/azure_instances_test.go @@ -24,7 +24,7 @@ import ( "reflect" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/go-autorest/autorest/to" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 89686b10a30b5..ea23708c9cc50 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -23,7 +23,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" "github.com/golang/glog" @@ -88,7 +88,7 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( Tags: newTags, Zones: createZones, Sku: &compute.DiskSku{ - Name: compute.StorageAccountTypes(options.StorageAccountType), + Name: compute.DiskStorageAccountTypes(options.StorageAccountType), }, DiskProperties: &compute.DiskProperties{ DiskSizeGB: &diskSizeGB, diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index d408a6af18386..bcfa50c91318b 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -29,7 +29,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" diff --git a/pkg/cloudprovider/providers/azure/azure_test.go b/pkg/cloudprovider/providers/azure/azure_test.go index 876749b7af55c..9e2f57a8a99eb 100644 --- a/pkg/cloudprovider/providers/azure/azure_test.go +++ b/pkg/cloudprovider/providers/azure/azure_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" "github.com/stretchr/testify/assert" diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index c772813aa21b3..b1d924aa83eb0 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -17,7 +17,9 @@ limitations under the License. package azure import ( - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "net/http" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "k8s.io/api/core/v1" diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index b17bcaff7be36..3233073bfcf98 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" "github.com/golang/glog" diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go index f7d431992d669..e963a854dc8fb 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_test.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" "github.com/stretchr/testify/assert" diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index f106dc610f42d..92c0c977993d1 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest" "github.com/golang/glog" diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 647bb9198026d..2aaa227e48f45 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], @@ -75,7 +75,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index f446201acbb41..419a4838e0cdb 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -25,7 +25,7 @@ import ( "strconv" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/golang/glog" "k8s.io/api/core/v1" diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 5dfe7ff711285..7c58808b03c96 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage" "github.com/golang/glog" @@ -190,7 +190,7 @@ func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachine continue } if strings.ToUpper(*size.Name) == vmsize { - glog.V(12).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %s", *size.Name, *size.MaxDataDiskCount) + glog.V(12).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) return int64(*size.MaxDataDiskCount) } } diff --git a/pkg/volume/azure_dd/azure_dd_test.go b/pkg/volume/azure_dd/azure_dd_test.go index fa97c4951e42b..dfe922872e12b 100644 --- a/pkg/volume/azure_dd/azure_dd_test.go +++ b/pkg/volume/azure_dd/azure_dd_test.go @@ -20,7 +20,7 @@ import ( "os" "testing" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/go-autorest/autorest/to" "github.com/stretchr/testify/assert" From d36d383a8cb513bf2c83f64fbc4582f8ca6188bc Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Mon, 15 Apr 2019 17:09:27 +0800 Subject: [PATCH 72/96] Replace vmss update API with instance-level update API --- .../providers/azure/azure_client.go | 25 - .../providers/azure/azure_fakes.go | 6 +- .../providers/azure/azure_loadbalancer.go | 6 +- .../providers/azure/azure_standard.go | 16 +- .../providers/azure/azure_vmsets.go | 9 +- .../providers/azure/azure_vmss.go | 456 +++++++++--------- 6 files changed, 240 insertions(+), 278 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index e76a0854e8144..e4f2cac1894ae 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -89,7 +89,6 @@ type SecurityGroupsClient interface { // VirtualMachineScaleSetsClient defines needed functions for azure compute.VirtualMachineScaleSetsClient type VirtualMachineScaleSetsClient interface { - CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) @@ -803,30 +802,6 @@ func newAzVirtualMachineScaleSetsClient(config *azClientConfig) *azVirtualMachin } } -func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) (resp *http.Response, err error) { - /* Write rate limiting */ - if !az.rateLimiterWriter.TryAccept() { - err = createRateLimitErr(true, "VMSSCreateOrUpdate") - return - } - - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) - defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) - }() - - mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID) - future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters) - mc.Observe(err) - if err != nil { - return future.Response(), err - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err -} - func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) { if !az.rateLimiterReader.TryAccept() { err = createRateLimitErr(false, "VMSSGet") diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 0e63e5e431129..10c4192a5361c 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -902,7 +902,11 @@ func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (f *fakeVMSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { + return fmt.Errorf("unimplemented") +} + +func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { return fmt.Errorf("unimplemented") } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 52c5ba2957e52..ff2c3e53bec1f 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -829,13 +829,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) + glog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName) err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { - glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) + glog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err) return nil, err } - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) + glog.V(10).Infof("EnsureBackendPoolDeleted(%s) for service %s: end", lbBackendPoolID, serviceName) // Remove the LB. glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index bcfa50c91318b..5e16df9aeab78 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -629,24 +629,24 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri return nic, nil } -// ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is +// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. -func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { +func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { vmName := mapNodeNameToVMName(nodeName) serviceName := getServiceName(service) nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName) if err != nil { if err == errNotInVMSet { - glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + glog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) return nil } - glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + glog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) return err } if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState { - glog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) + glog.Warningf("EnsureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) return nil } @@ -734,7 +734,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No } f := func() error { - err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) + err := as.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) if err != nil { return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err) } @@ -751,8 +751,8 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No return nil } -// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. +func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { // Do nothing for availability set. return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index b1d924aa83eb0..93686702f5c80 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -17,8 +17,6 @@ limitations under the License. package azure import ( - "net/http" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" @@ -57,8 +55,11 @@ type VMSet interface { // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error - // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. - EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error + // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is + // participating in the specified LoadBalancer Backend Pool. + EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error + // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. + EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 3233073bfcf98..8091e057db9a0 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -31,7 +31,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" ) @@ -44,6 +44,7 @@ var ( resourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines(?:.*)`) vmssNicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(?:.*)/virtualMachines/(?:.*)/networkInterfaces/(?:.*)`) vmssMachineIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s" + vmssIPConfigurationRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Compute/virtualMachineScaleSets/(.+)/virtualMachines/(.+)/networkInterfaces(?:.*)`) ) // scaleSet implements VMSet interface for Azure scale set. @@ -345,6 +346,16 @@ func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSet return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name) } +// getVmssMachineID returns the full identifier of a vmss virtual machine. +func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string { + return fmt.Sprintf( + vmssMachineIDTemplate, + az.SubscriptionID, + strings.ToLower(resourceGroup), + scaleSetName, + instanceID) +} + // machineName is composed of computerNamePrefix and 36-based instanceID. // And instanceID part if in fixed length of 6 characters. // Refer https://msftstack.wordpress.com/2017/05/10/figuring-out-azure-vm-scale-set-machine-names/. @@ -588,9 +599,8 @@ func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (comp return result, exists, err } -// getPrimaryNetworkConfiguration gets primary network interface configuration for scale sets. -func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) { - networkConfigurations := *networkConfigurationList +// getPrimarynetworkInterfaceConfiguration gets primary network interface configuration for scale set virtual machine. +func (ss *scaleSet) getPrimarynetworkInterfaceConfiguration(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) { if len(networkConfigurations) == 1 { return &networkConfigurations[0], nil } @@ -602,10 +612,10 @@ func (ss *scaleSet) getPrimaryNetworkConfiguration(networkConfigurationList *[]c } } - return nil, fmt.Errorf("failed to find a primary network configuration for the scale set %q", scaleSetName) + return nil, fmt.Errorf("failed to find a primary network configuration for the scale set VM %q", nodeName) } -func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, scaleSetName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) { +func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetIPConfiguration, error) { ipConfigurations := *config.IPConfigurations if len(ipConfigurations) == 1 { return &ipConfigurations[0], nil @@ -618,18 +628,7 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine } } - return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set %q", scaleSetName) -} - -// createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. -func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error { - return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) - return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err) - }) + return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set VM %q", nodeName) } // updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. @@ -643,70 +642,35 @@ func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetNa }) } -// getNodesScaleSets returns scalesets with instanceIDs and standard node names for given nodes. -func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, []*v1.Node, error) { - scalesets := make(map[string]sets.String) - standardNodes := []*v1.Node{} - - for _, curNode := range nodes { - if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) { - glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name) - continue - } - - if ss.ShouldNodeExcludedFromLoadBalancer(curNode) { - glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name) - continue - } - - curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID) - if err != nil { - glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name) - standardNodes = append(standardNodes, curNode) - continue - } - - if _, ok := scalesets[curScaleSetName]; !ok { - scalesets[curScaleSetName] = sets.NewString() - } - - instanceID, err := getLastSegment(curNode.Spec.ProviderID) - if err != nil { - glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err) - return nil, nil, err - } - - scalesets[curScaleSetName].Insert(instanceID) - } - - return scalesets, standardNodes, nil -} - -// ensureHostsInVMSetPool ensures the given Node's primary IP configurations are -// participating in the vmSet's LoadBalancer Backend Pool. -func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { - glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) - serviceName := getServiceName(service) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName) +// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is +// participating in the specified LoadBalancer Backend Pool. +func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { + klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) + vmName := mapNodeNameToVMName(nodeName) + ssName, instanceID, vm, err := ss.getVmssVM(vmName) if err != nil { - glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) return err } - if !exists { - errorMessage := fmt.Errorf("Scale set %q not found", vmSetName) - glog.Errorf("%v", errorMessage) - return errorMessage + + // Check scale set name: + // - For basic SKU load balancer, errNotInVMSet should be returned if the node's + // scale set is mismatched with vmSetName. + // - For standard SKU load balancer, backend could belong to multiple VMSS, so we + // don't check vmSet for it. + if vmSetName != "" && !ss.useStandardLoadBalancer() && !strings.EqualFold(vmSetName, ssName) { + klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the scaleSet %s", vmName, vmSetName) + return nil } // Find primary network interface configuration. - networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations - primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, vmSetName) + networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations + primaryNetworkInterfaceConfiguration, err := ss.getPrimarynetworkInterfaceConfiguration(networkInterfaceConfigurations, vmName) if err != nil { return err } // Find primary IP configuration. - primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, vmSetName) + primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkInterfaceConfiguration, vmName) if err != nil { return err } @@ -723,270 +687,288 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st break } } - if !foundPool { - if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 { - // Although standard load balancer supports backends from multiple vmss, - // the same network interface couldn't be added to more than one load balancer of - // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain - // about this. - newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) - for _, pool := range newBackendPools { - if pool.ID != nil { - newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) - } - } - isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) - if err != nil { - return err - } - if !isSameLB { - glog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmSetName, oldLBName) - return nil - } - } - newBackendPools = append(newBackendPools, - compute.SubResource{ - ID: to.StringPtr(backendPoolID), - }) - primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools + // The backendPoolID has already been found from existing LoadBalancerBackendAddressPools. + if foundPool { + return nil + } - ctx, cancel := getContextWithCancel() - defer cancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) - resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + if ss.useStandardLoadBalancer() && len(newBackendPools) > 0 { + // Although standard load balancer supports backends from multiple scale + // sets, the same network interface couldn't be added to more than one load balancer of + // the same type. Omit those nodes (e.g. masters) so Azure ARM won't complain + // about this. + newBackendPoolsIDs := make([]string, 0, len(newBackendPools)) + for _, pool := range newBackendPools { + if pool.ID != nil { + newBackendPoolsIDs = append(newBackendPoolsIDs, *pool.ID) } } + isSameLB, oldLBName, err := isBackendPoolOnSameLB(backendPoolID, newBackendPoolsIDs) if err != nil { return err } + if !isSameLB { + klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + return nil + } } - // Update instances to latest VMSS model. - vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ - InstanceIds: &instanceIDs, + // Compose a new vmssVM with added backendPoolID. + newBackendPools = append(newBackendPools, + compute.SubResource{ + ID: to.StringPtr(backendPoolID), + }) + primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools + newVM := compute.VirtualMachineScaleSetVM{ + Sku: vm.Sku, + Location: vm.Location, + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + HardwareProfile: vm.HardwareProfile, + NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{ + NetworkInterfaceConfigurations: &networkInterfaceConfigurations, + }, + }, } + + // Get the node resource group. + nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) + if err != nil { + return err + } + + // Invalidate the cache since we would update it. + key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) + defer ss.vmssVMCache.Delete(key) + + // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() defer cancel() - instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs) + klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID) + resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) + if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { + klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) + retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) } } - if err != nil { - return err - } - return nil + return err } // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { - serviceName := getServiceName(service) - scalesets, standardNodes, err := ss.getNodesScaleSets(nodes) - if err != nil { - glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) - return err - } + hostUpdates := make([]func() error, 0, len(nodes)) + for _, node := range nodes { + localNodeName := node.Name - for ssName, instanceIDs := range scalesets { - // Only add nodes belonging to specified vmSet for basic SKU LB. - if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) { + if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) { + klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) continue } - if instanceIDs.Len() == 0 { - // This may happen when scaling a vmss capacity to 0. - glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName) - // InstanceIDs is required to update vmss, use * instead here since there are no nodes actually. - instanceIDs.Insert("*") + if ss.ShouldNodeExcludedFromLoadBalancer(node) { + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + continue } - err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal) - if err != nil { - glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) - return err + f := func() error { + // VMAS nodes should also be added to the SLB backends. + if ss.useStandardLoadBalancer() { + // Check whether the node is VMAS virtual machine. + managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName) + if err != nil { + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) + return err + } + if managedByAS { + return ss.availabilitySet.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) + } + } + + err := ss.EnsureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) + if err != nil { + return fmt.Errorf("EnsureHostInPool(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err) + } + return nil } + hostUpdates = append(hostUpdates, f) } - if ss.useStandardLoadBalancer() && len(standardNodes) > 0 { - err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal) - if err != nil { - glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) - return err - } + errs := utilerrors.AggregateGoroutines(hostUpdates...) + if errs != nil { + return utilerrors.Flatten(errs) } return nil } -// ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset. -func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error { - glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName) +// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node. +func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error { + ssName, instanceID, vm, err := ss.getVmssVM(nodeName) if err != nil { - glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) return err } - if !exists { - glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName) - return nil - } // Find primary network interface configuration. - networkConfigureList := virtualMachineScaleSet.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations - primaryNetworkConfiguration, err := ss.getPrimaryNetworkConfiguration(networkConfigureList, ssName) + networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations + primaryNetworkInterfaceConfiguration, err := ss.getPrimarynetworkInterfaceConfiguration(networkInterfaceConfigurations, nodeName) if err != nil { return err } - // Find primary IP configuration. - primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkConfiguration, ssName) + // Find primary IP configuration.4 + primaryIPConfiguration, err := ss.getPrimaryIPConfigForScaleSet(primaryNetworkInterfaceConfiguration, nodeName) if err != nil { return err } - - // Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration. if primaryIPConfiguration.LoadBalancerBackendAddressPools == nil || len(*primaryIPConfiguration.LoadBalancerBackendAddressPools) == 0 { return nil } + + // Construct new loadBalancerBackendAddressPools and remove backendAddressPools from primary IP configuration. existingBackendPools := *primaryIPConfiguration.LoadBalancerBackendAddressPools newBackendPools := []compute.SubResource{} foundPool := false for i := len(existingBackendPools) - 1; i >= 0; i-- { curPool := existingBackendPools[i] - if strings.EqualFold(poolID, *curPool.ID) { - glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName) + if strings.EqualFold(backendPoolID, *curPool.ID) { + klog.V(10).Infof("ensureBackendPoolDeletedFromNode gets unwanted backend pool %q for node %s", backendPoolID, nodeName) foundPool = true newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...) } } + + // Pool not found, assume it has been already removed. if !foundPool { - // Pool not found, assume it has been already removed. return nil } - // Update scale set with backoff. + // Compose a new vmssVM with added backendPoolID. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName) + newVM := compute.VirtualMachineScaleSetVM{ + Sku: vm.Sku, + Location: vm.Location, + VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ + HardwareProfile: vm.HardwareProfile, + NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{ + NetworkInterfaceConfigurations: &networkInterfaceConfigurations, + }, + }, + } + + // Get the node resource group. + nodeResourceGroup, err := ss.GetNodeResourceGroup(nodeName) + if err != nil { + return err + } + + // Invalidate the cache since we would update it. + key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) + defer ss.vmssVMCache.Delete(key) + + // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() defer cancel() - resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) + klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID) + resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) + klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) + retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) + klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) } } if err != nil { - return err + klog.Errorf("ensureBackendPoolDeletedFromNode failed to update vmssVM(%s) with backendPoolID %s: %v", nodeName, backendPoolID, err) + } else { + klog.V(2).Infof("ensureBackendPoolDeletedFromNode update vmssVM(%s) with backendPoolID %s succeeded", nodeName, backendPoolID) } + return err +} - // Update instances to latest VMSS model. - instanceIDs := []string{"*"} - vmInstanceIDs := compute.VirtualMachineScaleSetVMInstanceRequiredIDs{ - InstanceIds: &instanceIDs, - } - instanceCtx, instanceCancel := getContextWithCancel() - defer instanceCancel() - instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) - } +// getNodeNameByIPConfigurationID gets the node name by IP configuration ID. +func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) { + matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID) + if len(matches) != 4 { + klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID) + return "", ErrorNotVmssInstance } + + resourceGroup := matches[1] + scaleSetName := matches[2] + instanceID := matches[3] + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID) if err != nil { - return err + return "", err } - // Update virtualMachineScaleSet again. This is a workaround for removing VMSS reference from LB. - // TODO: remove this workaround when figuring out the root cause. - if len(newBackendPools) == 0 { - updateCtx, updateCancel := getContextWithCancel() - defer updateCancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName) - resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) - if retryErr != nil { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) - } - } + if vm.OsProfile != nil && vm.OsProfile.ComputerName != nil { + return strings.ToLower(*vm.OsProfile.ComputerName), nil } - return nil + return "", nil } -// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. +func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { + // Returns nil if backend address pools already deleted. if backendAddressPools == nil { return nil } - scalesets := sets.NewString() + ipConfigurationIDs := []string{} for _, backendPool := range *backendAddressPools { - if strings.EqualFold(*backendPool.ID, poolID) && backendPool.BackendIPConfigurations != nil { - for _, ipConfigurations := range *backendPool.BackendIPConfigurations { - if ipConfigurations.ID == nil { + if strings.EqualFold(*backendPool.ID, backendPoolID) && backendPool.BackendIPConfigurations != nil { + for _, ipConf := range *backendPool.BackendIPConfigurations { + if ipConf.ID == nil { continue } - ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID) - if err != nil { - glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) - continue - } - - scalesets.Insert(ssName) + ipConfigurationIDs = append(ipConfigurationIDs, *ipConf.ID) } - break } } - for ssName := range scalesets { - // Only remove nodes belonging to specified vmSet to basic LB backends. - if !ss.useStandardLoadBalancer() && !strings.EqualFold(ssName, vmSetName) { - continue - } + hostUpdates := make([]func() error, 0, len(ipConfigurationIDs)) + for i := range ipConfigurationIDs { + ipConfigurationID := ipConfigurationIDs[i] - err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName) - if err != nil { - glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) - return err + f := func() error { + if scaleSetName, err := extractScaleSetNameByProviderID(ipConfigurationID); err == nil { + // Only remove nodes belonging to specified vmSet to basic LB backends. + if !ss.useStandardLoadBalancer() && !strings.EqualFold(scaleSetName, vmSetName) { + return nil + } + } + + nodeName, err := ss.getNodeNameByIPConfigurationID(ipConfigurationID) + if err != nil { + if err == ErrorNotVmssInstance { // Do nothing for the VMAS nodes. + return nil + } + klog.Errorf("Failed to getNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err) + return err + } + + err = ss.ensureBackendPoolDeletedFromNode(service, nodeName, backendPoolID) + if err != nil { + return fmt.Errorf("failed to ensure backend pool %s deleted from node %s: %v", backendPoolID, nodeName, err) + } + + return nil } + hostUpdates = append(hostUpdates, f) } - return nil -} + errs := utilerrors.AggregateGoroutines(hostUpdates...) + if errs != nil { + return utilerrors.Flatten(errs) + } -// getVmssMachineID returns the full identifier of a vmss virtual machine. -func (az *Cloud) getVmssMachineID(resourceGroup, scaleSetName, instanceID string) string { - return fmt.Sprintf( - vmssMachineIDTemplate, - az.SubscriptionID, - resourceGroup, - scaleSetName, - instanceID) + return nil } From f3f44eb93dc3cbf6725543d3978e49d230e78a4e Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Tue, 16 Apr 2019 16:49:30 +0800 Subject: [PATCH 73/96] Cleanup codes that not required any more --- .../providers/azure/azure_client.go | 25 ------- .../providers/azure/azure_vmss.go | 75 +++++-------------- 2 files changed, 19 insertions(+), 81 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index e4f2cac1894ae..16dbba4d57d9d 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -91,7 +91,6 @@ type SecurityGroupsClient interface { type VirtualMachineScaleSetsClient interface { Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result compute.VirtualMachineScaleSet, err error) List(ctx context.Context, resourceGroupName string) (result []compute.VirtualMachineScaleSet, err error) - UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) } // VirtualMachineScaleSetVMsClient defines needed functions for azure compute.VirtualMachineScaleSetVMsClient @@ -849,30 +848,6 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro return result, nil } -func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (resp *http.Response, err error) { - /* Write rate limiting */ - if !az.rateLimiterWriter.TryAccept() { - err = createRateLimitErr(true, "VMSSUpdateInstances") - return - } - - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) - defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) - }() - - mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) - future, err := az.client.UpdateInstances(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) - mc.Observe(err) - if err != nil { - return future.Response(), err - } - - err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err -} - // azVirtualMachineScaleSetVMsClient implements VirtualMachineScaleSetVMsClient. type azVirtualMachineScaleSetVMsClient struct { client compute.VirtualMachineScaleSetVMsClient diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index 8091e057db9a0..f24ef77a88013 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -32,7 +32,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" ) @@ -574,31 +573,6 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return nic, nil } -// getScaleSetWithRetry gets scale set with exponential backoff retry -func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) { - var result compute.VirtualMachineScaleSet - var exists bool - - err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - cached, retryErr := ss.vmssCache.Get(name) - if retryErr != nil { - ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error()) - glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) - return false, nil - } - glog.V(4).Infof("backoff: success for scale set %q", name) - - if cached != nil { - exists = true - result = *(cached.(*compute.VirtualMachineScaleSet)) - } - - return true, nil - }) - - return result, exists, err -} - // getPrimarynetworkInterfaceConfiguration gets primary network interface configuration for scale set virtual machine. func (ss *scaleSet) getPrimarynetworkInterfaceConfiguration(networkConfigurations []compute.VirtualMachineScaleSetNetworkConfiguration, nodeName string) (*compute.VirtualMachineScaleSetNetworkConfiguration, error) { if len(networkConfigurations) == 1 { @@ -631,21 +605,10 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine return nil, fmt.Errorf("failed to find a primary IP configuration for the scale set VM %q", nodeName) } -// updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. -func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { - return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) - return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err) - }) -} - // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { - klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) + glog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName) if err != nil { @@ -658,7 +621,7 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam // - For standard SKU load balancer, backend could belong to multiple VMSS, so we // don't check vmSet for it. if vmSetName != "" && !ss.useStandardLoadBalancer() && !strings.EqualFold(vmSetName, ssName) { - klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the scaleSet %s", vmName, vmSetName) + glog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the scaleSet %s", vmName, vmSetName) return nil } @@ -709,7 +672,7 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam return err } if !isSameLB { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) return nil } } @@ -744,14 +707,14 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() defer cancel() - klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID) + glog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID) resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) + glog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) + retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, newVM) if retryErr != nil { err = retryErr - klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) + glog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err) } } @@ -766,12 +729,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac localNodeName := node.Name if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) { - klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) continue } if ss.ShouldNodeExcludedFromLoadBalancer(node) { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) continue } @@ -781,7 +744,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // Check whether the node is VMAS virtual machine. managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName) if err != nil { - klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) + glog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) return err } if managedByAS { @@ -836,7 +799,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa for i := len(existingBackendPools) - 1; i >= 0; i-- { curPool := existingBackendPools[i] if strings.EqualFold(backendPoolID, *curPool.ID) { - klog.V(10).Infof("ensureBackendPoolDeletedFromNode gets unwanted backend pool %q for node %s", backendPoolID, nodeName) + glog.V(10).Infof("ensureBackendPoolDeletedFromNode gets unwanted backend pool %q for node %s", backendPoolID, nodeName) foundPool = true newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...) } @@ -873,20 +836,20 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() defer cancel() - klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID) + glog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID) resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) - retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM) + glog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) + retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, newVM) if retryErr != nil { err = retryErr - klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) + glog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err) } } if err != nil { - klog.Errorf("ensureBackendPoolDeletedFromNode failed to update vmssVM(%s) with backendPoolID %s: %v", nodeName, backendPoolID, err) + glog.Errorf("ensureBackendPoolDeletedFromNode failed to update vmssVM(%s) with backendPoolID %s: %v", nodeName, backendPoolID, err) } else { - klog.V(2).Infof("ensureBackendPoolDeletedFromNode update vmssVM(%s) with backendPoolID %s succeeded", nodeName, backendPoolID) + glog.V(2).Infof("ensureBackendPoolDeletedFromNode update vmssVM(%s) with backendPoolID %s succeeded", nodeName, backendPoolID) } return err } @@ -895,7 +858,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (string, error) { matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID) if len(matches) != 4 { - klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID) + glog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is mananaged by availability set", ipConfigurationID) return "", ErrorNotVmssInstance } @@ -951,7 +914,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, if err == ErrorNotVmssInstance { // Do nothing for the VMAS nodes. return nil } - klog.Errorf("Failed to getNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err) + glog.Errorf("Failed to getNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err) return err } From 894d783dab9b07d1ca5a9fa89cb676ea2f355bf7 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Tue, 16 Apr 2019 17:19:04 +0800 Subject: [PATCH 74/96] Add unit tests --- pkg/cloudprovider/providers/azure/BUILD | 1 - .../providers/azure/azure_vmss_test.go | 67 +++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index c183c84996cb6..d0da37b0309f2 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -79,7 +79,6 @@ go_test( srcs = [ "azure_backoff_test.go", "azure_cache_test.go", - "azure_controller_common_test.go", "azure_instances_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go index e963a854dc8fb..b1d818c56fdbc 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_test.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -80,6 +80,21 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomai ID: &interfaceID, }, } + ipConfigurations := []compute.VirtualMachineScaleSetIPConfiguration{ + { + Name: to.StringPtr("ipconfig1"), + VirtualMachineScaleSetIPConfigurationProperties: &compute.VirtualMachineScaleSetIPConfigurationProperties{}, + }, + } + networkConfigurations := []compute.VirtualMachineScaleSetNetworkConfiguration{ + { + Name: to.StringPtr("ipconfig1"), + ID: to.StringPtr("fakeNetworkConfiguration"), + VirtualMachineScaleSetNetworkConfigurationProperties: &compute.VirtualMachineScaleSetNetworkConfigurationProperties{ + IPConfigurations: &ipConfigurations, + }, + }, + } vmssVM := compute.VirtualMachineScaleSetVM{ VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ OsProfile: &compute.OSProfile{ @@ -88,6 +103,9 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomai NetworkProfile: &compute.NetworkProfile{ NetworkInterfaces: &networkInterfaces, }, + NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{ + NetworkInterfaceConfigurations: &networkConfigurations, + }, InstanceView: &compute.VirtualMachineScaleSetVMInstanceView{ PlatformFaultDomain: &faultDomain, }, @@ -307,3 +325,52 @@ func TestGetIPByNodeName(t *testing.T) { assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description) } } + +func TestGetNodeNameByIPConfigurationID(t *testing.T) { + ipConfigurationIDTemplate := "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s/networkInterfaces/%s/ipConfigurations/ipconfig1" + + testCases := []struct { + description string + scaleSet string + vmList []string + ipConfigurationID string + expected string + expectError bool + }{ + { + description: "getNodeNameByIPConfigurationID should get node's Name when the node is existing", + scaleSet: "scaleset1", + ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset1", "0", "scaleset1"), + vmList: []string{"vmssee6c2000000", "vmssee6c2000001"}, + expected: "vmssee6c2000000", + }, + { + description: "getNodeNameByIPConfigurationID should return error for non-exist nodes", + scaleSet: "scaleset2", + ipConfigurationID: fmt.Sprintf(ipConfigurationIDTemplate, "scaleset2", "3", "scaleset1"), + vmList: []string{"vmssee6c2000002", "vmssee6c2000003"}, + expectError: true, + }, + { + description: "getNodeNameByIPConfigurationID should return error for wrong ipConfigurationID", + scaleSet: "scaleset3", + ipConfigurationID: "invalid-configuration-id", + vmList: []string{"vmssee6c2000004", "vmssee6c2000005"}, + expectError: true, + }, + } + + for _, test := range testCases { + ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList) + assert.NoError(t, err, test.description) + + nodeName, err := ss.getNodeNameByIPConfigurationID(test.ipConfigurationID) + if test.expectError { + assert.Error(t, err, test.description) + continue + } + + assert.NoError(t, err, test.description) + assert.Equal(t, test.expected, nodeName, test.description) + } +} From e60f9faa181cafb4845394a2a4918b8ed21a4f71 Mon Sep 17 00:00:00 2001 From: Pengfei Ni Date: Wed, 24 Apr 2019 05:45:54 +0000 Subject: [PATCH 75/96] Update vendors --- Godeps/Godeps.json | 87 +- Godeps/LICENSES | 2 +- vendor/BUILD | 2 +- .../{2018-04-01 => 2019-03-01}/compute/BUILD | 9 +- .../compute/availabilitysets.go | 14 +- .../compute/client.go | 2 +- .../2019-03-01/compute/containerservices.go | 475 +++ .../compute/disks.go | 28 +- .../mgmt/2019-03-01/compute/galleries.go | 435 ++ .../mgmt/2019-03-01/compute/galleryimages.go | 367 ++ .../compute/galleryimageversions.go | 375 ++ .../compute/images.go | 12 +- .../compute/loganalytics.go | 4 +- .../compute/models.go | 3687 +++++++++++++---- .../compute/operations.go | 2 +- .../mgmt/2019-03-01/compute/resourceskus.go | 130 + .../compute/snapshots.go | 36 +- .../compute/usage.go | 2 +- .../compute/version.go | 2 +- .../compute/virtualmachineextensionimages.go | 6 +- .../compute/virtualmachineextensions.go | 10 +- .../compute/virtualmachineimages.go | 10 +- .../compute/virtualmachineruncommands.go | 4 +- .../compute/virtualmachines.go | 225 +- .../virtualmachinescalesetextensions.go | 8 +- .../virtualmachinescalesetrollingupgrades.go | 78 +- .../compute/virtualmachinescalesets.go | 133 +- .../compute/virtualmachinescalesetvms.go | 54 +- .../compute/virtualmachinesizes.go | 5 +- .../Azure/azure-sdk-for-go/version/version.go | 2 +- 30 files changed, 5217 insertions(+), 989 deletions(-) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/BUILD (88%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/availabilitysets.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/client.go (98%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/containerservices.go rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/disks.go (95%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleries.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimages.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimageversions.go rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/images.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/loganalytics.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/models.go (69%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/operations.go (99%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/resourceskus.go rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/snapshots.go (93%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/usage.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/version.go (94%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachineextensionimages.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachineextensions.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachineimages.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachineruncommands.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachines.go (86%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachinescalesetextensions.go (99%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachinescalesetrollingupgrades.go (75%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachinescalesets.go (93%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachinescalesetvms.go (96%) rename vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/{2018-04-01 => 2019-03-01}/compute/virtualmachinesizes.go (96%) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 1716fe823dfc3..ac4dcbba1ad64 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -36,34 +36,34 @@ "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" }, { - "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute", + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/version", - "Comment": "v19.0.0", - "Rev": "520918e6c8e8e1064154f51d13e02fad92b287b8" + "Comment": "v19.2.0", + "Rev": "6b60506b6dd8ad5d949df27d20e67fe437aa1e47" }, { "ImportPath": "github.com/Azure/go-ansiterm", @@ -1351,132 +1351,132 @@ }, { "ImportPath": "github.com/docker/docker/api", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/blkiodev", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/container", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/events", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/filters", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/image", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/network", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/registry", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/strslice", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/swarm", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/swarm/runtime", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/time", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/versions", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/api/types/volume", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/client", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/jsonmessage", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/parsers/operatingsystem", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/sysinfo", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { "ImportPath": "github.com/docker/docker/pkg/term/windows", - "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8d", + "Comment": "docs-v1.12.0-rc4-2016-07-15-9510-ga9fbbdc8dd", "Rev": "a9fbbdc8dd8794b20af358382ab780559bca589d" }, { @@ -1501,6 +1501,7 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", + "Comment": "v0.8.0-dev.2-1265-ga9cd636e", "Rev": "a9cd636e37898226332c439363e2ed0ea185ae92" }, { @@ -1542,7 +1543,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Comment": "v4.1.0-19-g5858425f75500d", + "Comment": "v4.1.0-19-g5858425", "Rev": "5858425f75500d40c52783dce87d085a483ce135" }, { @@ -2252,17 +2253,17 @@ }, { "ImportPath": "github.com/heketi/heketi/client/api/go-client", - "Comment": "v4.0.0-95-gaaf4061", + "Comment": "v4.0.0-95-gaaf40619", "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { "ImportPath": "github.com/heketi/heketi/pkg/glusterfs/api", - "Comment": "v4.0.0-95-gaaf4061", + "Comment": "v4.0.0-95-gaaf40619", "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { "ImportPath": "github.com/heketi/heketi/pkg/utils", - "Comment": "v4.0.0-95-gaaf4061", + "Comment": "v4.0.0-95-gaaf40619", "Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b" }, { diff --git a/Godeps/LICENSES b/Godeps/LICENSES index d5a75875afeaf..6f6cfcf33b76e 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -8137,7 +8137,7 @@ SOFTWARE. ================================================================================ -= vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute licensed under: = += vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute licensed under: = Apache License diff --git a/vendor/BUILD b/vendor/BUILD index 92ce77cb69399..9b72fc1bfae4b 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -15,7 +15,7 @@ filegroup( "//vendor/bitbucket.org/ww/goautoneg:all-srcs", "//vendor/cloud.google.com/go/compute/metadata:all-srcs", "//vendor/cloud.google.com/go/internal:all-srcs", - "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:all-srcs", + "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage:all-srcs", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/BUILD b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/BUILD similarity index 88% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/BUILD rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/BUILD index 6a9f486e61f53..2f8fb9e7692e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/BUILD +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/BUILD @@ -5,11 +5,16 @@ go_library( srcs = [ "availabilitysets.go", "client.go", + "containerservices.go", "disks.go", + "galleries.go", + "galleryimages.go", + "galleryimageversions.go", "images.go", "loganalytics.go", "models.go", "operations.go", + "resourceskus.go", "snapshots.go", "usage.go", "version.go", @@ -24,8 +29,8 @@ go_library( "virtualmachinescalesetvms.go", "virtualmachinesizes.go", ], - importmap = "k8s.io/kubernetes/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", - importpath = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute", + importmap = "k8s.io/kubernetes/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute", + importpath = "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute", visibility = ["//visibility:public"], deps = [ "//vendor/github.com/Azure/azure-sdk-for-go/version:go_default_library", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/availabilitysets.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/availabilitysets.go index 095954766e76a..32a4301ee8640 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/availabilitysets.go @@ -74,7 +74,7 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -143,7 +143,7 @@ func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,7 +209,7 @@ func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -275,7 +275,7 @@ func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -370,7 +370,7 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -433,7 +433,7 @@ func (client AvailabilitySetsClient) ListBySubscriptionPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -528,7 +528,7 @@ func (client AvailabilitySetsClient) UpdatePreparer(ctx context.Context, resourc "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/client.go similarity index 98% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/client.go index 49391ea5dd026..b23c9ca74268d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/client.go @@ -1,4 +1,4 @@ -// Package compute implements the Azure ARM Compute service API version 2018-04-01. +// Package compute implements the Azure ARM Compute service API version . // // Compute Client package compute diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/containerservices.go new file mode 100644 index 0000000000000..b16788bfa558c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/containerservices.go @@ -0,0 +1,475 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ContainerServicesClient is the compute Client +type ContainerServicesClient struct { + BaseClient +} + +// NewContainerServicesClient creates an instance of the ContainerServicesClient client. +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client. +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and +// agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +// parameters - parameters supplied to the Create or Update a Container Service operation. +func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ContainerServiceProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.Secret", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.MasterProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.AgentPoolProfiles", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-z][a-z0-9_-]*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.DiagnosticsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.ContainerServicesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified container service in the specified subscription and resource group. The operation does +// not delete other resources created as part of creating a container service, including storage accounts, VMs, and +// availability sets. All the other resources created with the container service are part of the same resource group +// and can be deleted individually. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container service in the specified subscription and resource group. The +// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters +// and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +// containerServiceName - the name of the container service in the specified subscription and resource group. +func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of container services in the specified subscription. The operation returns properties of each +// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents. +func (client ContainerServicesClient) List(ctx context.Context) (result ContainerServiceListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The +// operation returns properties of each container service including state, orchestrator, number of masters and agents, +// and FQDNs of masters and agents. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listByResourceGroupNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/disks.go similarity index 95% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/disks.go index cdc570377d011..0819b76e937d9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/disks.go @@ -55,16 +55,8 @@ func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, }}, - {Target: "disk.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, - }}, - {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}, + {Target: "disk.DiskProperties.EncryptionSettingsCollection", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettingsCollection.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { return result, validation.NewError("compute.DisksClient", "CreateOrUpdate", err.Error()) } @@ -92,7 +84,7 @@ func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -167,7 +159,7 @@ func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -245,7 +237,7 @@ func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName str "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -315,7 +307,7 @@ func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroup "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -390,7 +382,7 @@ func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, erro "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -483,7 +475,7 @@ func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -573,7 +565,7 @@ func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGrou "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -646,7 +638,7 @@ func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleries.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleries.go new file mode 100644 index 0000000000000..1f02a5c03972a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleries.go @@ -0,0 +1,435 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// GalleriesClient is the compute Client +type GalleriesClient struct { + BaseClient +} + +// NewGalleriesClient creates an instance of the GalleriesClient client. +func NewGalleriesClient(subscriptionID string) GalleriesClient { + return NewGalleriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleriesClientWithBaseURI creates an instance of the GalleriesClient client. +func NewGalleriesClientWithBaseURI(baseURI string, subscriptionID string) GalleriesClient { + return GalleriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. The allowed characters are alphabets and numbers with +// dots and periods allowed in the middle. The maximum length is 80 characters. +// gallery - parameters supplied to the create or update Shared Image Gallery operation. +func (client GalleriesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (result GalleriesCreateOrUpdateFuture, err error) { + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, gallery) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleriesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithJSON(gallery), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) CreateOrUpdateSender(req *http.Request) (future GalleriesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleriesClient) CreateOrUpdateResponder(resp *http.Response) (result Gallery, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery to be deleted. +func (client GalleriesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleriesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleriesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) DeleteSender(req *http.Request) (future GalleriesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleriesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a Shared Image Gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery. +func (client GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string) (result Gallery, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleriesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleriesClient) GetResponder(resp *http.Response) (result Gallery, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list galleries under a subscription. +func (client GalleriesClient) List(ctx context.Context) (result GalleryListPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.gl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure sending request") + return + } + + result.gl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GalleriesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GalleriesClient) ListResponder(resp *http.Response) (result GalleryList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client GalleriesClient) listNextResults(lastResults GalleryList) (result GalleryList, err error) { + req, err := lastResults.galleryListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleriesClient) ListComplete(ctx context.Context) (result GalleryListIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup list galleries under a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client GalleriesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result GalleryListPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.gl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.gl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client GalleriesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client GalleriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client GalleriesClient) ListByResourceGroupResponder(resp *http.Response) (result GalleryList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client GalleriesClient) listByResourceGroupNextResults(lastResults GalleryList) (result GalleryList, err error) { + req, err := lastResults.galleryListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleriesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result GalleryListIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimages.go new file mode 100644 index 0000000000000..410d4e6b7372f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimages.go @@ -0,0 +1,367 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// GalleryImagesClient is the compute Client +type GalleryImagesClient struct { + BaseClient +} + +// NewGalleryImagesClient creates an instance of the GalleryImagesClient client. +func NewGalleryImagesClient(subscriptionID string) GalleryImagesClient { + return NewGalleryImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryImagesClientWithBaseURI creates an instance of the GalleryImagesClient client. +func NewGalleryImagesClientWithBaseURI(baseURI string, subscriptionID string) GalleryImagesClient { + return GalleryImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be created. +// galleryImageName - the name of the gallery Image Definition to be created or updated. The allowed characters +// are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryImage - parameters supplied to the create or update gallery image operation. +func (client GalleryImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (result GalleryImagesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: galleryImage, + Constraints: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "galleryImage.GalleryImageProperties.Identifier.Publisher", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "galleryImage.GalleryImageProperties.Identifier.Offer", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "galleryImage.GalleryImageProperties.Identifier.Sku", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}}}}); err != nil { + return result, validation.NewError("compute.GalleryImagesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithJSON(galleryImage), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) CreateOrUpdateSender(req *http.Request) (future GalleryImagesCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery image. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition is to be deleted. +// galleryImageName - the name of the gallery Image Definition to be deleted. +func (client GalleryImagesClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImagesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryImagesClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) DeleteSender(req *http.Request) (future GalleryImagesDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery from which the Image Definitions are to be retrieved. +// galleryImageName - the name of the gallery Image Definition to be retrieved. +func (client GalleryImagesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImage, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryImagesClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) GetResponder(resp *http.Response) (result GalleryImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGallery list gallery Image Definitions in a gallery. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery from which Image Definitions are to be listed. +func (client GalleryImagesClient) ListByGallery(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListPage, err error) { + result.fn = client.listByGalleryNextResults + req, err := client.ListByGalleryPreparer(ctx, resourceGroupName, galleryName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGallerySender(req) + if err != nil { + result.gil.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure sending request") + return + } + + result.gil, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "ListByGallery", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryPreparer prepares the ListByGallery request. +func (client GalleryImagesClient) ListByGalleryPreparer(ctx context.Context, resourceGroupName string, galleryName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGallerySender sends the ListByGallery request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImagesClient) ListByGallerySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByGalleryResponder handles the response to the ListByGallery request. The method always +// closes the http.Response Body. +func (client GalleryImagesClient) ListByGalleryResponder(resp *http.Response) (result GalleryImageList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryNextResults retrieves the next set of results, if any. +func (client GalleryImagesClient) listByGalleryNextResults(lastResults GalleryImageList) (result GalleryImageList, err error) { + req, err := lastResults.galleryImageListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGallerySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesClient", "listByGalleryNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryImagesClient) ListByGalleryComplete(ctx context.Context, resourceGroupName string, galleryName string) (result GalleryImageListIterator, err error) { + result.page, err = client.ListByGallery(ctx, resourceGroupName, galleryName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimageversions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimageversions.go new file mode 100644 index 0000000000000..259cfa3ab77fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/galleryimageversions.go @@ -0,0 +1,375 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// GalleryImageVersionsClient is the compute Client +type GalleryImageVersionsClient struct { + BaseClient +} + +// NewGalleryImageVersionsClient creates an instance of the GalleryImageVersionsClient client. +func NewGalleryImageVersionsClient(subscriptionID string) GalleryImageVersionsClient { + return NewGalleryImageVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGalleryImageVersionsClientWithBaseURI creates an instance of the GalleryImageVersionsClient client. +func NewGalleryImageVersionsClientWithBaseURI(baseURI string, subscriptionID string) GalleryImageVersionsClient { + return GalleryImageVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version is to be created. +// galleryImageVersionName - the name of the gallery Image Version to be created. Needs to follow semantic +// version name pattern: The allowed characters are digit and period. Digits must be within the range of a +// 32-bit integer. Format: .. +// galleryImageVersion - parameters supplied to the create or update gallery Image Version operation. +func (client GalleryImageVersionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (result GalleryImageVersionsCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: galleryImageVersion, + Constraints: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "galleryImageVersion.GalleryImageVersionProperties.PublishingProfile", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("compute.GalleryImageVersionsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GalleryImageVersionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithJSON(galleryImageVersion), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) CreateOrUpdateSender(req *http.Request) (future GalleryImageVersionsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) CreateOrUpdateResponder(resp *http.Response) (result GalleryImageVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery Image Version to be deleted. +func (client GalleryImageVersionsClient) Delete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (result GalleryImageVersionsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GalleryImageVersionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) DeleteSender(req *http.Request) (future GalleryImageVersionsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieves information about a gallery Image Version. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the gallery Image Definition in which the Image Version resides. +// galleryImageVersionName - the name of the gallery Image Version to be retrieved. +// expand - the expand expression to apply on the operation. +func (client GalleryImageVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (result GalleryImageVersion, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GalleryImageVersionsClient) GetPreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, expand ReplicationStatusTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryImageVersionName": autorest.Encode("path", galleryImageVersionName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) GetResponder(resp *http.Response) (result GalleryImageVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByGalleryImage list gallery Image Versions in a gallery Image Definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// galleryName - the name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - the name of the Shared Image Gallery Image Definition from which the Image Versions are +// to be listed. +func (client GalleryImageVersionsClient) ListByGalleryImage(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListPage, err error) { + result.fn = client.listByGalleryImageNextResults + req, err := client.ListByGalleryImagePreparer(ctx, resourceGroupName, galleryName, galleryImageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", nil, "Failure preparing request") + return + } + + resp, err := client.ListByGalleryImageSender(req) + if err != nil { + result.givl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure sending request") + return + } + + result.givl, err = client.ListByGalleryImageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "ListByGalleryImage", resp, "Failure responding to request") + } + + return +} + +// ListByGalleryImagePreparer prepares the ListByGalleryImage request. +func (client GalleryImageVersionsClient) ListByGalleryImagePreparer(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "galleryImageName": autorest.Encode("path", galleryImageName), + "galleryName": autorest.Encode("path", galleryName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByGalleryImageSender sends the ListByGalleryImage request. The method will close the +// http.Response Body if it receives an error. +func (client GalleryImageVersionsClient) ListByGalleryImageSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByGalleryImageResponder handles the response to the ListByGalleryImage request. The method always +// closes the http.Response Body. +func (client GalleryImageVersionsClient) ListByGalleryImageResponder(resp *http.Response) (result GalleryImageVersionList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByGalleryImageNextResults retrieves the next set of results, if any. +func (client GalleryImageVersionsClient) listByGalleryImageNextResults(lastResults GalleryImageVersionList) (result GalleryImageVersionList, err error) { + req, err := lastResults.galleryImageVersionListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByGalleryImageSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByGalleryImageResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsClient", "listByGalleryImageNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByGalleryImageComplete enumerates all values, automatically crossing page boundaries as required. +func (client GalleryImageVersionsClient) ListByGalleryImageComplete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string) (result GalleryImageVersionListIterator, err error) { + result.page, err = client.ListByGalleryImage(ctx, resourceGroupName, galleryName, galleryImageName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/images.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/images.go index f468d6fd0c3f8..ec17c277b5fde 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/images.go @@ -68,7 +68,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +141,7 @@ func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -218,7 +218,7 @@ func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName st "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -285,7 +285,7 @@ func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, err "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -378,7 +378,7 @@ func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, reso "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -467,7 +467,7 @@ func (client ImagesClient) UpdatePreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/loganalytics.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/loganalytics.go index e958f84b10033..cde3d08ad677e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/loganalytics.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/loganalytics.go @@ -74,7 +74,7 @@ func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -153,7 +153,7 @@ func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Con "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/models.go similarity index 69% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/models.go index 166790d037adf..cdb0aea0e68a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/models.go @@ -34,11 +34,47 @@ const ( None AccessLevel = "None" // Read ... Read AccessLevel = "Read" + // Write ... + Write AccessLevel = "Write" ) // PossibleAccessLevelValues returns an array of possible values for the AccessLevel const type. func PossibleAccessLevelValues() []AccessLevel { - return []AccessLevel{None, Read} + return []AccessLevel{None, Read, Write} +} + +// AggregatedReplicationState enumerates the values for aggregated replication state. +type AggregatedReplicationState string + +const ( + // Completed ... + Completed AggregatedReplicationState = "Completed" + // Failed ... + Failed AggregatedReplicationState = "Failed" + // InProgress ... + InProgress AggregatedReplicationState = "InProgress" + // Unknown ... + Unknown AggregatedReplicationState = "Unknown" +) + +// PossibleAggregatedReplicationStateValues returns an array of possible values for the AggregatedReplicationState const type. +func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState { + return []AggregatedReplicationState{Completed, Failed, InProgress, Unknown} +} + +// AvailabilitySetSkuTypes enumerates the values for availability set sku types. +type AvailabilitySetSkuTypes string + +const ( + // Aligned ... + Aligned AvailabilitySetSkuTypes = "Aligned" + // Classic ... + Classic AvailabilitySetSkuTypes = "Classic" +) + +// PossibleAvailabilitySetSkuTypesValues returns an array of possible values for the AvailabilitySetSkuTypes const type. +func PossibleAvailabilitySetSkuTypesValues() []AvailabilitySetSkuTypes { + return []AvailabilitySetSkuTypes{Aligned, Classic} } // CachingTypes enumerates the values for caching types. @@ -71,6 +107,143 @@ func PossibleComponentNamesValues() []ComponentNames { return []ComponentNames{MicrosoftWindowsShellSetup} } +// ContainerServiceOrchestratorTypes enumerates the values for container service orchestrator types. +type ContainerServiceOrchestratorTypes string + +const ( + // Custom ... + Custom ContainerServiceOrchestratorTypes = "Custom" + // DCOS ... + DCOS ContainerServiceOrchestratorTypes = "DCOS" + // Kubernetes ... + Kubernetes ContainerServiceOrchestratorTypes = "Kubernetes" + // Swarm ... + Swarm ContainerServiceOrchestratorTypes = "Swarm" +) + +// PossibleContainerServiceOrchestratorTypesValues returns an array of possible values for the ContainerServiceOrchestratorTypes const type. +func PossibleContainerServiceOrchestratorTypesValues() []ContainerServiceOrchestratorTypes { + return []ContainerServiceOrchestratorTypes{Custom, DCOS, Kubernetes, Swarm} +} + +// ContainerServiceVMSizeTypes enumerates the values for container service vm size types. +type ContainerServiceVMSizeTypes string + +const ( + // StandardA0 ... + StandardA0 ContainerServiceVMSizeTypes = "Standard_A0" + // StandardA1 ... + StandardA1 ContainerServiceVMSizeTypes = "Standard_A1" + // StandardA10 ... + StandardA10 ContainerServiceVMSizeTypes = "Standard_A10" + // StandardA11 ... + StandardA11 ContainerServiceVMSizeTypes = "Standard_A11" + // StandardA2 ... + StandardA2 ContainerServiceVMSizeTypes = "Standard_A2" + // StandardA3 ... + StandardA3 ContainerServiceVMSizeTypes = "Standard_A3" + // StandardA4 ... + StandardA4 ContainerServiceVMSizeTypes = "Standard_A4" + // StandardA5 ... + StandardA5 ContainerServiceVMSizeTypes = "Standard_A5" + // StandardA6 ... + StandardA6 ContainerServiceVMSizeTypes = "Standard_A6" + // StandardA7 ... + StandardA7 ContainerServiceVMSizeTypes = "Standard_A7" + // StandardA8 ... + StandardA8 ContainerServiceVMSizeTypes = "Standard_A8" + // StandardA9 ... + StandardA9 ContainerServiceVMSizeTypes = "Standard_A9" + // StandardD1 ... + StandardD1 ContainerServiceVMSizeTypes = "Standard_D1" + // StandardD11 ... + StandardD11 ContainerServiceVMSizeTypes = "Standard_D11" + // StandardD11V2 ... + StandardD11V2 ContainerServiceVMSizeTypes = "Standard_D11_v2" + // StandardD12 ... + StandardD12 ContainerServiceVMSizeTypes = "Standard_D12" + // StandardD12V2 ... + StandardD12V2 ContainerServiceVMSizeTypes = "Standard_D12_v2" + // StandardD13 ... + StandardD13 ContainerServiceVMSizeTypes = "Standard_D13" + // StandardD13V2 ... + StandardD13V2 ContainerServiceVMSizeTypes = "Standard_D13_v2" + // StandardD14 ... + StandardD14 ContainerServiceVMSizeTypes = "Standard_D14" + // StandardD14V2 ... + StandardD14V2 ContainerServiceVMSizeTypes = "Standard_D14_v2" + // StandardD1V2 ... + StandardD1V2 ContainerServiceVMSizeTypes = "Standard_D1_v2" + // StandardD2 ... + StandardD2 ContainerServiceVMSizeTypes = "Standard_D2" + // StandardD2V2 ... + StandardD2V2 ContainerServiceVMSizeTypes = "Standard_D2_v2" + // StandardD3 ... + StandardD3 ContainerServiceVMSizeTypes = "Standard_D3" + // StandardD3V2 ... + StandardD3V2 ContainerServiceVMSizeTypes = "Standard_D3_v2" + // StandardD4 ... + StandardD4 ContainerServiceVMSizeTypes = "Standard_D4" + // StandardD4V2 ... + StandardD4V2 ContainerServiceVMSizeTypes = "Standard_D4_v2" + // StandardD5V2 ... + StandardD5V2 ContainerServiceVMSizeTypes = "Standard_D5_v2" + // StandardDS1 ... + StandardDS1 ContainerServiceVMSizeTypes = "Standard_DS1" + // StandardDS11 ... + StandardDS11 ContainerServiceVMSizeTypes = "Standard_DS11" + // StandardDS12 ... + StandardDS12 ContainerServiceVMSizeTypes = "Standard_DS12" + // StandardDS13 ... + StandardDS13 ContainerServiceVMSizeTypes = "Standard_DS13" + // StandardDS14 ... + StandardDS14 ContainerServiceVMSizeTypes = "Standard_DS14" + // StandardDS2 ... + StandardDS2 ContainerServiceVMSizeTypes = "Standard_DS2" + // StandardDS3 ... + StandardDS3 ContainerServiceVMSizeTypes = "Standard_DS3" + // StandardDS4 ... + StandardDS4 ContainerServiceVMSizeTypes = "Standard_DS4" + // StandardG1 ... + StandardG1 ContainerServiceVMSizeTypes = "Standard_G1" + // StandardG2 ... + StandardG2 ContainerServiceVMSizeTypes = "Standard_G2" + // StandardG3 ... + StandardG3 ContainerServiceVMSizeTypes = "Standard_G3" + // StandardG4 ... + StandardG4 ContainerServiceVMSizeTypes = "Standard_G4" + // StandardG5 ... + StandardG5 ContainerServiceVMSizeTypes = "Standard_G5" + // StandardGS1 ... + StandardGS1 ContainerServiceVMSizeTypes = "Standard_GS1" + // StandardGS2 ... + StandardGS2 ContainerServiceVMSizeTypes = "Standard_GS2" + // StandardGS3 ... + StandardGS3 ContainerServiceVMSizeTypes = "Standard_GS3" + // StandardGS4 ... + StandardGS4 ContainerServiceVMSizeTypes = "Standard_GS4" + // StandardGS5 ... + StandardGS5 ContainerServiceVMSizeTypes = "Standard_GS5" +) + +// PossibleContainerServiceVMSizeTypesValues returns an array of possible values for the ContainerServiceVMSizeTypes const type. +func PossibleContainerServiceVMSizeTypesValues() []ContainerServiceVMSizeTypes { + return []ContainerServiceVMSizeTypes{StandardA0, StandardA1, StandardA10, StandardA11, StandardA2, StandardA3, StandardA4, StandardA5, StandardA6, StandardA7, StandardA8, StandardA9, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD1V2, StandardD2, StandardD2V2, StandardD3, StandardD3V2, StandardD4, StandardD4V2, StandardD5V2, StandardDS1, StandardDS11, StandardDS12, StandardDS13, StandardDS14, StandardDS2, StandardDS3, StandardDS4, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS5} +} + +// DiffDiskOptions enumerates the values for diff disk options. +type DiffDiskOptions string + +const ( + // Local ... + Local DiffDiskOptions = "Local" +) + +// PossibleDiffDiskOptionsValues returns an array of possible values for the DiffDiskOptions const type. +func PossibleDiffDiskOptionsValues() []DiffDiskOptions { + return []DiffDiskOptions{Local} +} + // DiskCreateOption enumerates the values for disk create option. type DiskCreateOption string @@ -87,11 +260,13 @@ const ( Import DiskCreateOption = "Import" // Restore ... Restore DiskCreateOption = "Restore" + // Upload ... + Upload DiskCreateOption = "Upload" ) // PossibleDiskCreateOptionValues returns an array of possible values for the DiskCreateOption const type. func PossibleDiskCreateOptionValues() []DiskCreateOption { - return []DiskCreateOption{Attach, Copy, Empty, FromImage, Import, Restore} + return []DiskCreateOption{Attach, Copy, Empty, FromImage, Import, Restore, Upload} } // DiskCreateOptionTypes enumerates the values for disk create option types. @@ -111,6 +286,80 @@ func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { return []DiskCreateOptionTypes{DiskCreateOptionTypesAttach, DiskCreateOptionTypesEmpty, DiskCreateOptionTypesFromImage} } +// DiskState enumerates the values for disk state. +type DiskState string + +const ( + // ActiveSAS ... + ActiveSAS DiskState = "ActiveSAS" + // ActiveUpload ... + ActiveUpload DiskState = "ActiveUpload" + // Attached ... + Attached DiskState = "Attached" + // ReadyToUpload ... + ReadyToUpload DiskState = "ReadyToUpload" + // Reserved ... + Reserved DiskState = "Reserved" + // Unattached ... + Unattached DiskState = "Unattached" +) + +// PossibleDiskStateValues returns an array of possible values for the DiskState const type. +func PossibleDiskStateValues() []DiskState { + return []DiskState{ActiveSAS, ActiveUpload, Attached, ReadyToUpload, Reserved, Unattached} +} + +// DiskStorageAccountTypes enumerates the values for disk storage account types. +type DiskStorageAccountTypes string + +const ( + // PremiumLRS ... + PremiumLRS DiskStorageAccountTypes = "Premium_LRS" + // StandardLRS ... + StandardLRS DiskStorageAccountTypes = "Standard_LRS" + // StandardSSDLRS ... + StandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS" + // UltraSSDLRS ... + UltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS" +) + +// PossibleDiskStorageAccountTypesValues returns an array of possible values for the DiskStorageAccountTypes const type. +func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes { + return []DiskStorageAccountTypes{PremiumLRS, StandardLRS, StandardSSDLRS, UltraSSDLRS} +} + +// HostCaching enumerates the values for host caching. +type HostCaching string + +const ( + // HostCachingNone ... + HostCachingNone HostCaching = "None" + // HostCachingReadOnly ... + HostCachingReadOnly HostCaching = "ReadOnly" + // HostCachingReadWrite ... + HostCachingReadWrite HostCaching = "ReadWrite" +) + +// PossibleHostCachingValues returns an array of possible values for the HostCaching const type. +func PossibleHostCachingValues() []HostCaching { + return []HostCaching{HostCachingNone, HostCachingReadOnly, HostCachingReadWrite} +} + +// HyperVGeneration enumerates the values for hyper v generation. +type HyperVGeneration string + +const ( + // V1 ... + V1 HyperVGeneration = "V1" + // V2 ... + V2 HyperVGeneration = "V2" +) + +// PossibleHyperVGenerationValues returns an array of possible values for the HyperVGeneration const type. +func PossibleHyperVGenerationValues() []HyperVGeneration { + return []HyperVGeneration{V1, V2} +} + // InstanceViewTypes enumerates the values for instance view types. type InstanceViewTypes string @@ -235,6 +484,107 @@ func PossibleProtocolTypesValues() []ProtocolTypes { return []ProtocolTypes{HTTP, HTTPS} } +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateCreating ... + ProvisioningStateCreating ProvisioningState = "Creating" + // ProvisioningStateDeleting ... + ProvisioningStateDeleting ProvisioningState = "Deleting" + // ProvisioningStateFailed ... + ProvisioningStateFailed ProvisioningState = "Failed" + // ProvisioningStateMigrating ... + ProvisioningStateMigrating ProvisioningState = "Migrating" + // ProvisioningStateSucceeded ... + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + // ProvisioningStateUpdating ... + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{ProvisioningStateCreating, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateMigrating, ProvisioningStateSucceeded, ProvisioningStateUpdating} +} + +// ProvisioningState1 enumerates the values for provisioning state 1. +type ProvisioningState1 string + +const ( + // ProvisioningState1Creating ... + ProvisioningState1Creating ProvisioningState1 = "Creating" + // ProvisioningState1Deleting ... + ProvisioningState1Deleting ProvisioningState1 = "Deleting" + // ProvisioningState1Failed ... + ProvisioningState1Failed ProvisioningState1 = "Failed" + // ProvisioningState1Migrating ... + ProvisioningState1Migrating ProvisioningState1 = "Migrating" + // ProvisioningState1Succeeded ... + ProvisioningState1Succeeded ProvisioningState1 = "Succeeded" + // ProvisioningState1Updating ... + ProvisioningState1Updating ProvisioningState1 = "Updating" +) + +// PossibleProvisioningState1Values returns an array of possible values for the ProvisioningState1 const type. +func PossibleProvisioningState1Values() []ProvisioningState1 { + return []ProvisioningState1{ProvisioningState1Creating, ProvisioningState1Deleting, ProvisioningState1Failed, ProvisioningState1Migrating, ProvisioningState1Succeeded, ProvisioningState1Updating} +} + +// ProvisioningState2 enumerates the values for provisioning state 2. +type ProvisioningState2 string + +const ( + // ProvisioningState2Creating ... + ProvisioningState2Creating ProvisioningState2 = "Creating" + // ProvisioningState2Deleting ... + ProvisioningState2Deleting ProvisioningState2 = "Deleting" + // ProvisioningState2Failed ... + ProvisioningState2Failed ProvisioningState2 = "Failed" + // ProvisioningState2Migrating ... + ProvisioningState2Migrating ProvisioningState2 = "Migrating" + // ProvisioningState2Succeeded ... + ProvisioningState2Succeeded ProvisioningState2 = "Succeeded" + // ProvisioningState2Updating ... + ProvisioningState2Updating ProvisioningState2 = "Updating" +) + +// PossibleProvisioningState2Values returns an array of possible values for the ProvisioningState2 const type. +func PossibleProvisioningState2Values() []ProvisioningState2 { + return []ProvisioningState2{ProvisioningState2Creating, ProvisioningState2Deleting, ProvisioningState2Failed, ProvisioningState2Migrating, ProvisioningState2Succeeded, ProvisioningState2Updating} +} + +// ReplicationState enumerates the values for replication state. +type ReplicationState string + +const ( + // ReplicationStateCompleted ... + ReplicationStateCompleted ReplicationState = "Completed" + // ReplicationStateFailed ... + ReplicationStateFailed ReplicationState = "Failed" + // ReplicationStateReplicating ... + ReplicationStateReplicating ReplicationState = "Replicating" + // ReplicationStateUnknown ... + ReplicationStateUnknown ReplicationState = "Unknown" +) + +// PossibleReplicationStateValues returns an array of possible values for the ReplicationState const type. +func PossibleReplicationStateValues() []ReplicationState { + return []ReplicationState{ReplicationStateCompleted, ReplicationStateFailed, ReplicationStateReplicating, ReplicationStateUnknown} +} + +// ReplicationStatusTypes enumerates the values for replication status types. +type ReplicationStatusTypes string + +const ( + // ReplicationStatusTypesReplicationStatus ... + ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = "ReplicationStatus" +) + +// PossibleReplicationStatusTypesValues returns an array of possible values for the ReplicationStatusTypes const type. +func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { + return []ReplicationStatusTypes{ReplicationStatusTypesReplicationStatus} +} + // ResourceIdentityType enumerates the values for resource identity type. type ResourceIdentityType string @@ -254,6 +604,53 @@ func PossibleResourceIdentityTypeValues() []ResourceIdentityType { return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned} } +// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type. +type ResourceSkuCapacityScaleType string + +const ( + // ResourceSkuCapacityScaleTypeAutomatic ... + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + // ResourceSkuCapacityScaleTypeManual ... + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + // ResourceSkuCapacityScaleTypeNone ... + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +// PossibleResourceSkuCapacityScaleTypeValues returns an array of possible values for the ResourceSkuCapacityScaleType const type. +func PossibleResourceSkuCapacityScaleTypeValues() []ResourceSkuCapacityScaleType { + return []ResourceSkuCapacityScaleType{ResourceSkuCapacityScaleTypeAutomatic, ResourceSkuCapacityScaleTypeManual, ResourceSkuCapacityScaleTypeNone} +} + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // NotAvailableForSubscription ... + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // QuotaID ... + QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// PossibleResourceSkuRestrictionsReasonCodeValues returns an array of possible values for the ResourceSkuRestrictionsReasonCode const type. +func PossibleResourceSkuRestrictionsReasonCodeValues() []ResourceSkuRestrictionsReasonCode { + return []ResourceSkuRestrictionsReasonCode{NotAvailableForSubscription, QuotaID} +} + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // Location ... + Location ResourceSkuRestrictionsType = "Location" + // Zone ... + Zone ResourceSkuRestrictionsType = "Zone" +) + +// PossibleResourceSkuRestrictionsTypeValues returns an array of possible values for the ResourceSkuRestrictionsType const type. +func PossibleResourceSkuRestrictionsTypeValues() []ResourceSkuRestrictionsType { + return []ResourceSkuRestrictionsType{Location, Zone} +} + // RollingUpgradeActionType enumerates the values for rolling upgrade action type. type RollingUpgradeActionType string @@ -273,19 +670,19 @@ func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType { type RollingUpgradeStatusCode string const ( - // Cancelled ... - Cancelled RollingUpgradeStatusCode = "Cancelled" - // Completed ... - Completed RollingUpgradeStatusCode = "Completed" - // Faulted ... - Faulted RollingUpgradeStatusCode = "Faulted" - // RollingForward ... - RollingForward RollingUpgradeStatusCode = "RollingForward" + // RollingUpgradeStatusCodeCancelled ... + RollingUpgradeStatusCodeCancelled RollingUpgradeStatusCode = "Cancelled" + // RollingUpgradeStatusCodeCompleted ... + RollingUpgradeStatusCodeCompleted RollingUpgradeStatusCode = "Completed" + // RollingUpgradeStatusCodeFaulted ... + RollingUpgradeStatusCodeFaulted RollingUpgradeStatusCode = "Faulted" + // RollingUpgradeStatusCodeRollingForward ... + RollingUpgradeStatusCodeRollingForward RollingUpgradeStatusCode = "RollingForward" ) // PossibleRollingUpgradeStatusCodeValues returns an array of possible values for the RollingUpgradeStatusCode const type. func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { - return []RollingUpgradeStatusCode{Cancelled, Completed, Faulted, RollingForward} + return []RollingUpgradeStatusCode{RollingUpgradeStatusCodeCancelled, RollingUpgradeStatusCodeCompleted, RollingUpgradeStatusCodeFaulted, RollingUpgradeStatusCodeRollingForward} } // SettingNames enumerates the values for setting names. @@ -307,17 +704,17 @@ func PossibleSettingNamesValues() []SettingNames { type SnapshotStorageAccountTypes string const ( - // PremiumLRS ... - PremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" - // StandardLRS ... - StandardLRS SnapshotStorageAccountTypes = "Standard_LRS" - // StandardZRS ... - StandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" + // SnapshotStorageAccountTypesPremiumLRS ... + SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" + // SnapshotStorageAccountTypesStandardLRS ... + SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS" + // SnapshotStorageAccountTypesStandardZRS ... + SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" ) // PossibleSnapshotStorageAccountTypesValues returns an array of possible values for the SnapshotStorageAccountTypes const type. func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes { - return []SnapshotStorageAccountTypes{PremiumLRS, StandardLRS, StandardZRS} + return []SnapshotStorageAccountTypes{SnapshotStorageAccountTypesPremiumLRS, SnapshotStorageAccountTypesStandardLRS, SnapshotStorageAccountTypesStandardZRS} } // StatusLevelTypes enumerates the values for status level types. @@ -337,6 +734,21 @@ func PossibleStatusLevelTypesValues() []StatusLevelTypes { return []StatusLevelTypes{Error, Info, Warning} } +// StorageAccountType enumerates the values for storage account type. +type StorageAccountType string + +const ( + // StorageAccountTypeStandardLRS ... + StorageAccountTypeStandardLRS StorageAccountType = "Standard_LRS" + // StorageAccountTypeStandardZRS ... + StorageAccountTypeStandardZRS StorageAccountType = "Standard_ZRS" +) + +// PossibleStorageAccountTypeValues returns an array of possible values for the StorageAccountType const type. +func PossibleStorageAccountTypeValues() []StorageAccountType { + return []StorageAccountType{StorageAccountTypeStandardLRS, StorageAccountTypeStandardZRS} +} + // StorageAccountTypes enumerates the values for storage account types. type StorageAccountTypes string @@ -347,11 +759,13 @@ const ( StorageAccountTypesStandardLRS StorageAccountTypes = "Standard_LRS" // StorageAccountTypesStandardSSDLRS ... StorageAccountTypesStandardSSDLRS StorageAccountTypes = "StandardSSD_LRS" + // StorageAccountTypesUltraSSDLRS ... + StorageAccountTypesUltraSSDLRS StorageAccountTypes = "UltraSSD_LRS" ) // PossibleStorageAccountTypesValues returns an array of possible values for the StorageAccountTypes const type. func PossibleStorageAccountTypesValues() []StorageAccountTypes { - return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS} + return []StorageAccountTypes{StorageAccountTypesPremiumLRS, StorageAccountTypesStandardLRS, StorageAccountTypesStandardSSDLRS, StorageAccountTypesUltraSSDLRS} } // UpgradeMode enumerates the values for upgrade mode. @@ -375,17 +789,17 @@ func PossibleUpgradeModeValues() []UpgradeMode { type UpgradeOperationInvoker string const ( - // Platform ... - Platform UpgradeOperationInvoker = "Platform" - // Unknown ... - Unknown UpgradeOperationInvoker = "Unknown" - // User ... - User UpgradeOperationInvoker = "User" + // UpgradeOperationInvokerPlatform ... + UpgradeOperationInvokerPlatform UpgradeOperationInvoker = "Platform" + // UpgradeOperationInvokerUnknown ... + UpgradeOperationInvokerUnknown UpgradeOperationInvoker = "Unknown" + // UpgradeOperationInvokerUser ... + UpgradeOperationInvokerUser UpgradeOperationInvoker = "User" ) // PossibleUpgradeOperationInvokerValues returns an array of possible values for the UpgradeOperationInvoker const type. func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker { - return []UpgradeOperationInvoker{Platform, Unknown, User} + return []UpgradeOperationInvoker{UpgradeOperationInvokerPlatform, UpgradeOperationInvokerUnknown, UpgradeOperationInvokerUser} } // UpgradeState enumerates the values for upgrade state. @@ -456,448 +870,376 @@ func PossibleVirtualMachineScaleSetSkuScaleTypeValues() []VirtualMachineScaleSet type VirtualMachineSizeTypes string const ( - // BasicA0 ... - BasicA0 VirtualMachineSizeTypes = "Basic_A0" - // BasicA1 ... - BasicA1 VirtualMachineSizeTypes = "Basic_A1" - // BasicA2 ... - BasicA2 VirtualMachineSizeTypes = "Basic_A2" - // BasicA3 ... - BasicA3 VirtualMachineSizeTypes = "Basic_A3" - // BasicA4 ... - BasicA4 VirtualMachineSizeTypes = "Basic_A4" - // StandardA0 ... - StandardA0 VirtualMachineSizeTypes = "Standard_A0" - // StandardA1 ... - StandardA1 VirtualMachineSizeTypes = "Standard_A1" - // StandardA10 ... - StandardA10 VirtualMachineSizeTypes = "Standard_A10" - // StandardA11 ... - StandardA11 VirtualMachineSizeTypes = "Standard_A11" - // StandardA1V2 ... - StandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" - // StandardA2 ... - StandardA2 VirtualMachineSizeTypes = "Standard_A2" - // StandardA2mV2 ... - StandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" - // StandardA2V2 ... - StandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" - // StandardA3 ... - StandardA3 VirtualMachineSizeTypes = "Standard_A3" - // StandardA4 ... - StandardA4 VirtualMachineSizeTypes = "Standard_A4" - // StandardA4mV2 ... - StandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" - // StandardA4V2 ... - StandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" - // StandardA5 ... - StandardA5 VirtualMachineSizeTypes = "Standard_A5" - // StandardA6 ... - StandardA6 VirtualMachineSizeTypes = "Standard_A6" - // StandardA7 ... - StandardA7 VirtualMachineSizeTypes = "Standard_A7" - // StandardA8 ... - StandardA8 VirtualMachineSizeTypes = "Standard_A8" - // StandardA8mV2 ... - StandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" - // StandardA8V2 ... - StandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" - // StandardA9 ... - StandardA9 VirtualMachineSizeTypes = "Standard_A9" - // StandardB1ms ... - StandardB1ms VirtualMachineSizeTypes = "Standard_B1ms" - // StandardB1s ... - StandardB1s VirtualMachineSizeTypes = "Standard_B1s" - // StandardB2ms ... - StandardB2ms VirtualMachineSizeTypes = "Standard_B2ms" - // StandardB2s ... - StandardB2s VirtualMachineSizeTypes = "Standard_B2s" - // StandardB4ms ... - StandardB4ms VirtualMachineSizeTypes = "Standard_B4ms" - // StandardB8ms ... - StandardB8ms VirtualMachineSizeTypes = "Standard_B8ms" - // StandardD1 ... - StandardD1 VirtualMachineSizeTypes = "Standard_D1" - // StandardD11 ... - StandardD11 VirtualMachineSizeTypes = "Standard_D11" - // StandardD11V2 ... - StandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" - // StandardD12 ... - StandardD12 VirtualMachineSizeTypes = "Standard_D12" - // StandardD12V2 ... - StandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" - // StandardD13 ... - StandardD13 VirtualMachineSizeTypes = "Standard_D13" - // StandardD13V2 ... - StandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" - // StandardD14 ... - StandardD14 VirtualMachineSizeTypes = "Standard_D14" - // StandardD14V2 ... - StandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" - // StandardD15V2 ... - StandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" - // StandardD16sV3 ... - StandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3" - // StandardD16V3 ... - StandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" - // StandardD1V2 ... - StandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" - // StandardD2 ... - StandardD2 VirtualMachineSizeTypes = "Standard_D2" - // StandardD2sV3 ... - StandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3" - // StandardD2V2 ... - StandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" - // StandardD2V3 ... - StandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" - // StandardD3 ... - StandardD3 VirtualMachineSizeTypes = "Standard_D3" - // StandardD32sV3 ... - StandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3" - // StandardD32V3 ... - StandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" - // StandardD3V2 ... - StandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" - // StandardD4 ... - StandardD4 VirtualMachineSizeTypes = "Standard_D4" - // StandardD4sV3 ... - StandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3" - // StandardD4V2 ... - StandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" - // StandardD4V3 ... - StandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" - // StandardD5V2 ... - StandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" - // StandardD64sV3 ... - StandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3" - // StandardD64V3 ... - StandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" - // StandardD8sV3 ... - StandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3" - // StandardD8V3 ... - StandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" - // StandardDS1 ... - StandardDS1 VirtualMachineSizeTypes = "Standard_DS1" - // StandardDS11 ... - StandardDS11 VirtualMachineSizeTypes = "Standard_DS11" - // StandardDS11V2 ... - StandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" - // StandardDS12 ... - StandardDS12 VirtualMachineSizeTypes = "Standard_DS12" - // StandardDS12V2 ... - StandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" - // StandardDS13 ... - StandardDS13 VirtualMachineSizeTypes = "Standard_DS13" - // StandardDS132V2 ... - StandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" - // StandardDS134V2 ... - StandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" - // StandardDS13V2 ... - StandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" - // StandardDS14 ... - StandardDS14 VirtualMachineSizeTypes = "Standard_DS14" - // StandardDS144V2 ... - StandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" - // StandardDS148V2 ... - StandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" - // StandardDS14V2 ... - StandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" - // StandardDS15V2 ... - StandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" - // StandardDS1V2 ... - StandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" - // StandardDS2 ... - StandardDS2 VirtualMachineSizeTypes = "Standard_DS2" - // StandardDS2V2 ... - StandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" - // StandardDS3 ... - StandardDS3 VirtualMachineSizeTypes = "Standard_DS3" - // StandardDS3V2 ... - StandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" - // StandardDS4 ... - StandardDS4 VirtualMachineSizeTypes = "Standard_DS4" - // StandardDS4V2 ... - StandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" - // StandardDS5V2 ... - StandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" - // StandardE16sV3 ... - StandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3" - // StandardE16V3 ... - StandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" - // StandardE2sV3 ... - StandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3" - // StandardE2V3 ... - StandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" - // StandardE3216V3 ... - StandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" - // StandardE328sV3 ... - StandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" - // StandardE32sV3 ... - StandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3" - // StandardE32V3 ... - StandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" - // StandardE4sV3 ... - StandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3" - // StandardE4V3 ... - StandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" - // StandardE6416sV3 ... - StandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" - // StandardE6432sV3 ... - StandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" - // StandardE64sV3 ... - StandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3" - // StandardE64V3 ... - StandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" - // StandardE8sV3 ... - StandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3" - // StandardE8V3 ... - StandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" - // StandardF1 ... - StandardF1 VirtualMachineSizeTypes = "Standard_F1" - // StandardF16 ... - StandardF16 VirtualMachineSizeTypes = "Standard_F16" - // StandardF16s ... - StandardF16s VirtualMachineSizeTypes = "Standard_F16s" - // StandardF16sV2 ... - StandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2" - // StandardF1s ... - StandardF1s VirtualMachineSizeTypes = "Standard_F1s" - // StandardF2 ... - StandardF2 VirtualMachineSizeTypes = "Standard_F2" - // StandardF2s ... - StandardF2s VirtualMachineSizeTypes = "Standard_F2s" - // StandardF2sV2 ... - StandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2" - // StandardF32sV2 ... - StandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2" - // StandardF4 ... - StandardF4 VirtualMachineSizeTypes = "Standard_F4" - // StandardF4s ... - StandardF4s VirtualMachineSizeTypes = "Standard_F4s" - // StandardF4sV2 ... - StandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2" - // StandardF64sV2 ... - StandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2" - // StandardF72sV2 ... - StandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2" - // StandardF8 ... - StandardF8 VirtualMachineSizeTypes = "Standard_F8" - // StandardF8s ... - StandardF8s VirtualMachineSizeTypes = "Standard_F8s" - // StandardF8sV2 ... - StandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2" - // StandardG1 ... - StandardG1 VirtualMachineSizeTypes = "Standard_G1" - // StandardG2 ... - StandardG2 VirtualMachineSizeTypes = "Standard_G2" - // StandardG3 ... - StandardG3 VirtualMachineSizeTypes = "Standard_G3" - // StandardG4 ... - StandardG4 VirtualMachineSizeTypes = "Standard_G4" - // StandardG5 ... - StandardG5 VirtualMachineSizeTypes = "Standard_G5" - // StandardGS1 ... - StandardGS1 VirtualMachineSizeTypes = "Standard_GS1" - // StandardGS2 ... - StandardGS2 VirtualMachineSizeTypes = "Standard_GS2" - // StandardGS3 ... - StandardGS3 VirtualMachineSizeTypes = "Standard_GS3" - // StandardGS4 ... - StandardGS4 VirtualMachineSizeTypes = "Standard_GS4" - // StandardGS44 ... - StandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" - // StandardGS48 ... - StandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" - // StandardGS5 ... - StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" - // StandardGS516 ... - StandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" - // StandardGS58 ... - StandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" - // StandardH16 ... - StandardH16 VirtualMachineSizeTypes = "Standard_H16" - // StandardH16m ... - StandardH16m VirtualMachineSizeTypes = "Standard_H16m" - // StandardH16mr ... - StandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" - // StandardH16r ... - StandardH16r VirtualMachineSizeTypes = "Standard_H16r" - // StandardH8 ... - StandardH8 VirtualMachineSizeTypes = "Standard_H8" - // StandardH8m ... - StandardH8m VirtualMachineSizeTypes = "Standard_H8m" - // StandardL16s ... - StandardL16s VirtualMachineSizeTypes = "Standard_L16s" - // StandardL32s ... - StandardL32s VirtualMachineSizeTypes = "Standard_L32s" - // StandardL4s ... - StandardL4s VirtualMachineSizeTypes = "Standard_L4s" - // StandardL8s ... - StandardL8s VirtualMachineSizeTypes = "Standard_L8s" - // StandardM12832ms ... - StandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms" - // StandardM12864ms ... - StandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms" - // StandardM128ms ... - StandardM128ms VirtualMachineSizeTypes = "Standard_M128ms" - // StandardM128s ... - StandardM128s VirtualMachineSizeTypes = "Standard_M128s" - // StandardM6416ms ... - StandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms" - // StandardM6432ms ... - StandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms" - // StandardM64ms ... - StandardM64ms VirtualMachineSizeTypes = "Standard_M64ms" - // StandardM64s ... - StandardM64s VirtualMachineSizeTypes = "Standard_M64s" - // StandardNC12 ... - StandardNC12 VirtualMachineSizeTypes = "Standard_NC12" - // StandardNC12sV2 ... - StandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" - // StandardNC12sV3 ... - StandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" - // StandardNC24 ... - StandardNC24 VirtualMachineSizeTypes = "Standard_NC24" - // StandardNC24r ... - StandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" - // StandardNC24rsV2 ... - StandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" - // StandardNC24rsV3 ... - StandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" - // StandardNC24sV2 ... - StandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" - // StandardNC24sV3 ... - StandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" - // StandardNC6 ... - StandardNC6 VirtualMachineSizeTypes = "Standard_NC6" - // StandardNC6sV2 ... - StandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" - // StandardNC6sV3 ... - StandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" - // StandardND12s ... - StandardND12s VirtualMachineSizeTypes = "Standard_ND12s" - // StandardND24rs ... - StandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs" - // StandardND24s ... - StandardND24s VirtualMachineSizeTypes = "Standard_ND24s" - // StandardND6s ... - StandardND6s VirtualMachineSizeTypes = "Standard_ND6s" - // StandardNV12 ... - StandardNV12 VirtualMachineSizeTypes = "Standard_NV12" - // StandardNV24 ... - StandardNV24 VirtualMachineSizeTypes = "Standard_NV24" - // StandardNV6 ... - StandardNV6 VirtualMachineSizeTypes = "Standard_NV6" + // VirtualMachineSizeTypesBasicA0 ... + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0" + // VirtualMachineSizeTypesBasicA1 ... + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1" + // VirtualMachineSizeTypesBasicA2 ... + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2" + // VirtualMachineSizeTypesBasicA3 ... + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3" + // VirtualMachineSizeTypesBasicA4 ... + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4" + // VirtualMachineSizeTypesStandardA0 ... + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0" + // VirtualMachineSizeTypesStandardA1 ... + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1" + // VirtualMachineSizeTypesStandardA10 ... + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10" + // VirtualMachineSizeTypesStandardA11 ... + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11" + // VirtualMachineSizeTypesStandardA1V2 ... + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" + // VirtualMachineSizeTypesStandardA2 ... + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2" + // VirtualMachineSizeTypesStandardA2mV2 ... + VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" + // VirtualMachineSizeTypesStandardA2V2 ... + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" + // VirtualMachineSizeTypesStandardA3 ... + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3" + // VirtualMachineSizeTypesStandardA4 ... + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4" + // VirtualMachineSizeTypesStandardA4mV2 ... + VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" + // VirtualMachineSizeTypesStandardA4V2 ... + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" + // VirtualMachineSizeTypesStandardA5 ... + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5" + // VirtualMachineSizeTypesStandardA6 ... + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6" + // VirtualMachineSizeTypesStandardA7 ... + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7" + // VirtualMachineSizeTypesStandardA8 ... + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8" + // VirtualMachineSizeTypesStandardA8mV2 ... + VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" + // VirtualMachineSizeTypesStandardA8V2 ... + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" + // VirtualMachineSizeTypesStandardA9 ... + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9" + // VirtualMachineSizeTypesStandardB1ms ... + VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = "Standard_B1ms" + // VirtualMachineSizeTypesStandardB1s ... + VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = "Standard_B1s" + // VirtualMachineSizeTypesStandardB2ms ... + VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = "Standard_B2ms" + // VirtualMachineSizeTypesStandardB2s ... + VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = "Standard_B2s" + // VirtualMachineSizeTypesStandardB4ms ... + VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = "Standard_B4ms" + // VirtualMachineSizeTypesStandardB8ms ... + VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = "Standard_B8ms" + // VirtualMachineSizeTypesStandardD1 ... + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1" + // VirtualMachineSizeTypesStandardD11 ... + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11" + // VirtualMachineSizeTypesStandardD11V2 ... + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // VirtualMachineSizeTypesStandardD12 ... + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12" + // VirtualMachineSizeTypesStandardD12V2 ... + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // VirtualMachineSizeTypesStandardD13 ... + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13" + // VirtualMachineSizeTypesStandardD13V2 ... + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // VirtualMachineSizeTypesStandardD14 ... + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14" + // VirtualMachineSizeTypesStandardD14V2 ... + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // VirtualMachineSizeTypesStandardD15V2 ... + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + // VirtualMachineSizeTypesStandardD16sV3 ... + VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3" + // VirtualMachineSizeTypesStandardD16V3 ... + VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" + // VirtualMachineSizeTypesStandardD1V2 ... + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // VirtualMachineSizeTypesStandardD2 ... + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2" + // VirtualMachineSizeTypesStandardD2sV3 ... + VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3" + // VirtualMachineSizeTypesStandardD2V2 ... + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // VirtualMachineSizeTypesStandardD2V3 ... + VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" + // VirtualMachineSizeTypesStandardD3 ... + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3" + // VirtualMachineSizeTypesStandardD32sV3 ... + VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3" + // VirtualMachineSizeTypesStandardD32V3 ... + VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" + // VirtualMachineSizeTypesStandardD3V2 ... + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // VirtualMachineSizeTypesStandardD4 ... + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4" + // VirtualMachineSizeTypesStandardD4sV3 ... + VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3" + // VirtualMachineSizeTypesStandardD4V2 ... + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // VirtualMachineSizeTypesStandardD4V3 ... + VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" + // VirtualMachineSizeTypesStandardD5V2 ... + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // VirtualMachineSizeTypesStandardD64sV3 ... + VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3" + // VirtualMachineSizeTypesStandardD64V3 ... + VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" + // VirtualMachineSizeTypesStandardD8sV3 ... + VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3" + // VirtualMachineSizeTypesStandardD8V3 ... + VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" + // VirtualMachineSizeTypesStandardDS1 ... + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // VirtualMachineSizeTypesStandardDS11 ... + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // VirtualMachineSizeTypesStandardDS11V2 ... + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + // VirtualMachineSizeTypesStandardDS12 ... + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // VirtualMachineSizeTypesStandardDS12V2 ... + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + // VirtualMachineSizeTypesStandardDS13 ... + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // VirtualMachineSizeTypesStandardDS132V2 ... + VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" + // VirtualMachineSizeTypesStandardDS134V2 ... + VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" + // VirtualMachineSizeTypesStandardDS13V2 ... + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + // VirtualMachineSizeTypesStandardDS14 ... + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // VirtualMachineSizeTypesStandardDS144V2 ... + VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" + // VirtualMachineSizeTypesStandardDS148V2 ... + VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" + // VirtualMachineSizeTypesStandardDS14V2 ... + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // VirtualMachineSizeTypesStandardDS15V2 ... + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // VirtualMachineSizeTypesStandardDS1V2 ... + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + // VirtualMachineSizeTypesStandardDS2 ... + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // VirtualMachineSizeTypesStandardDS2V2 ... + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + // VirtualMachineSizeTypesStandardDS3 ... + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // VirtualMachineSizeTypesStandardDS3V2 ... + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + // VirtualMachineSizeTypesStandardDS4 ... + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // VirtualMachineSizeTypesStandardDS4V2 ... + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // VirtualMachineSizeTypesStandardDS5V2 ... + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + // VirtualMachineSizeTypesStandardE16sV3 ... + VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3" + // VirtualMachineSizeTypesStandardE16V3 ... + VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" + // VirtualMachineSizeTypesStandardE2sV3 ... + VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3" + // VirtualMachineSizeTypesStandardE2V3 ... + VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" + // VirtualMachineSizeTypesStandardE3216V3 ... + VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" + // VirtualMachineSizeTypesStandardE328sV3 ... + VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" + // VirtualMachineSizeTypesStandardE32sV3 ... + VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3" + // VirtualMachineSizeTypesStandardE32V3 ... + VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" + // VirtualMachineSizeTypesStandardE4sV3 ... + VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3" + // VirtualMachineSizeTypesStandardE4V3 ... + VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" + // VirtualMachineSizeTypesStandardE6416sV3 ... + VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" + // VirtualMachineSizeTypesStandardE6432sV3 ... + VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" + // VirtualMachineSizeTypesStandardE64sV3 ... + VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3" + // VirtualMachineSizeTypesStandardE64V3 ... + VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" + // VirtualMachineSizeTypesStandardE8sV3 ... + VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3" + // VirtualMachineSizeTypesStandardE8V3 ... + VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" + // VirtualMachineSizeTypesStandardF1 ... + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1" + // VirtualMachineSizeTypesStandardF16 ... + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16" + // VirtualMachineSizeTypesStandardF16s ... + VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s" + // VirtualMachineSizeTypesStandardF16sV2 ... + VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2" + // VirtualMachineSizeTypesStandardF1s ... + VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s" + // VirtualMachineSizeTypesStandardF2 ... + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2" + // VirtualMachineSizeTypesStandardF2s ... + VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s" + // VirtualMachineSizeTypesStandardF2sV2 ... + VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2" + // VirtualMachineSizeTypesStandardF32sV2 ... + VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2" + // VirtualMachineSizeTypesStandardF4 ... + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4" + // VirtualMachineSizeTypesStandardF4s ... + VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s" + // VirtualMachineSizeTypesStandardF4sV2 ... + VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2" + // VirtualMachineSizeTypesStandardF64sV2 ... + VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2" + // VirtualMachineSizeTypesStandardF72sV2 ... + VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2" + // VirtualMachineSizeTypesStandardF8 ... + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8" + // VirtualMachineSizeTypesStandardF8s ... + VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s" + // VirtualMachineSizeTypesStandardF8sV2 ... + VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2" + // VirtualMachineSizeTypesStandardG1 ... + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1" + // VirtualMachineSizeTypesStandardG2 ... + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2" + // VirtualMachineSizeTypesStandardG3 ... + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3" + // VirtualMachineSizeTypesStandardG4 ... + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4" + // VirtualMachineSizeTypesStandardG5 ... + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5" + // VirtualMachineSizeTypesStandardGS1 ... + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // VirtualMachineSizeTypesStandardGS2 ... + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // VirtualMachineSizeTypesStandardGS3 ... + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // VirtualMachineSizeTypesStandardGS4 ... + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // VirtualMachineSizeTypesStandardGS44 ... + VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" + // VirtualMachineSizeTypesStandardGS48 ... + VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" + // VirtualMachineSizeTypesStandardGS5 ... + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + // VirtualMachineSizeTypesStandardGS516 ... + VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" + // VirtualMachineSizeTypesStandardGS58 ... + VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" + // VirtualMachineSizeTypesStandardH16 ... + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16" + // VirtualMachineSizeTypesStandardH16m ... + VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m" + // VirtualMachineSizeTypesStandardH16mr ... + VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" + // VirtualMachineSizeTypesStandardH16r ... + VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r" + // VirtualMachineSizeTypesStandardH8 ... + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8" + // VirtualMachineSizeTypesStandardH8m ... + VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m" + // VirtualMachineSizeTypesStandardL16s ... + VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s" + // VirtualMachineSizeTypesStandardL32s ... + VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s" + // VirtualMachineSizeTypesStandardL4s ... + VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s" + // VirtualMachineSizeTypesStandardL8s ... + VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s" + // VirtualMachineSizeTypesStandardM12832ms ... + VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms" + // VirtualMachineSizeTypesStandardM12864ms ... + VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms" + // VirtualMachineSizeTypesStandardM128ms ... + VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = "Standard_M128ms" + // VirtualMachineSizeTypesStandardM128s ... + VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = "Standard_M128s" + // VirtualMachineSizeTypesStandardM6416ms ... + VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms" + // VirtualMachineSizeTypesStandardM6432ms ... + VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms" + // VirtualMachineSizeTypesStandardM64ms ... + VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = "Standard_M64ms" + // VirtualMachineSizeTypesStandardM64s ... + VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = "Standard_M64s" + // VirtualMachineSizeTypesStandardNC12 ... + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12" + // VirtualMachineSizeTypesStandardNC12sV2 ... + VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" + // VirtualMachineSizeTypesStandardNC12sV3 ... + VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" + // VirtualMachineSizeTypesStandardNC24 ... + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24" + // VirtualMachineSizeTypesStandardNC24r ... + VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" + // VirtualMachineSizeTypesStandardNC24rsV2 ... + VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" + // VirtualMachineSizeTypesStandardNC24rsV3 ... + VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" + // VirtualMachineSizeTypesStandardNC24sV2 ... + VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" + // VirtualMachineSizeTypesStandardNC24sV3 ... + VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" + // VirtualMachineSizeTypesStandardNC6 ... + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6" + // VirtualMachineSizeTypesStandardNC6sV2 ... + VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" + // VirtualMachineSizeTypesStandardNC6sV3 ... + VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" + // VirtualMachineSizeTypesStandardND12s ... + VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = "Standard_ND12s" + // VirtualMachineSizeTypesStandardND24rs ... + VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs" + // VirtualMachineSizeTypesStandardND24s ... + VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = "Standard_ND24s" + // VirtualMachineSizeTypesStandardND6s ... + VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = "Standard_ND6s" + // VirtualMachineSizeTypesStandardNV12 ... + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12" + // VirtualMachineSizeTypesStandardNV24 ... + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24" + // VirtualMachineSizeTypesStandardNV6 ... + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6" ) // PossibleVirtualMachineSizeTypesValues returns an array of possible values for the VirtualMachineSizeTypes const type. func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { - return []VirtualMachineSizeTypes{BasicA0, BasicA1, BasicA2, BasicA3, BasicA4, StandardA0, StandardA1, StandardA10, StandardA11, StandardA1V2, StandardA2, StandardA2mV2, StandardA2V2, StandardA3, StandardA4, StandardA4mV2, StandardA4V2, StandardA5, StandardA6, StandardA7, StandardA8, StandardA8mV2, StandardA8V2, StandardA9, StandardB1ms, StandardB1s, StandardB2ms, StandardB2s, StandardB4ms, StandardB8ms, StandardD1, StandardD11, StandardD11V2, StandardD12, StandardD12V2, StandardD13, StandardD13V2, StandardD14, StandardD14V2, StandardD15V2, StandardD16sV3, StandardD16V3, StandardD1V2, StandardD2, StandardD2sV3, StandardD2V2, StandardD2V3, StandardD3, StandardD32sV3, StandardD32V3, StandardD3V2, StandardD4, StandardD4sV3, StandardD4V2, StandardD4V3, StandardD5V2, StandardD64sV3, StandardD64V3, StandardD8sV3, StandardD8V3, StandardDS1, StandardDS11, StandardDS11V2, StandardDS12, StandardDS12V2, StandardDS13, StandardDS132V2, StandardDS134V2, StandardDS13V2, StandardDS14, StandardDS144V2, StandardDS148V2, StandardDS14V2, StandardDS15V2, StandardDS1V2, StandardDS2, StandardDS2V2, StandardDS3, StandardDS3V2, StandardDS4, StandardDS4V2, StandardDS5V2, StandardE16sV3, StandardE16V3, StandardE2sV3, StandardE2V3, StandardE3216V3, StandardE328sV3, StandardE32sV3, StandardE32V3, StandardE4sV3, StandardE4V3, StandardE6416sV3, StandardE6432sV3, StandardE64sV3, StandardE64V3, StandardE8sV3, StandardE8V3, StandardF1, StandardF16, StandardF16s, StandardF16sV2, StandardF1s, StandardF2, StandardF2s, StandardF2sV2, StandardF32sV2, StandardF4, StandardF4s, StandardF4sV2, StandardF64sV2, StandardF72sV2, StandardF8, StandardF8s, StandardF8sV2, StandardG1, StandardG2, StandardG3, StandardG4, StandardG5, StandardGS1, StandardGS2, StandardGS3, StandardGS4, StandardGS44, StandardGS48, StandardGS5, StandardGS516, StandardGS58, StandardH16, StandardH16m, StandardH16mr, StandardH16r, StandardH8, StandardH8m, StandardL16s, StandardL32s, StandardL4s, StandardL8s, StandardM12832ms, StandardM12864ms, StandardM128ms, StandardM128s, StandardM6416ms, StandardM6432ms, StandardM64ms, StandardM64s, StandardNC12, StandardNC12sV2, StandardNC12sV3, StandardNC24, StandardNC24r, StandardNC24rsV2, StandardNC24rsV3, StandardNC24sV2, StandardNC24sV3, StandardNC6, StandardNC6sV2, StandardNC6sV3, StandardND12s, StandardND24rs, StandardND24s, StandardND6s, StandardNV12, StandardNV24, StandardNV6} + return []VirtualMachineSizeTypes{VirtualMachineSizeTypesBasicA0, VirtualMachineSizeTypesBasicA1, VirtualMachineSizeTypesBasicA2, VirtualMachineSizeTypesBasicA3, VirtualMachineSizeTypesBasicA4, VirtualMachineSizeTypesStandardA0, VirtualMachineSizeTypesStandardA1, VirtualMachineSizeTypesStandardA10, VirtualMachineSizeTypesStandardA11, VirtualMachineSizeTypesStandardA1V2, VirtualMachineSizeTypesStandardA2, VirtualMachineSizeTypesStandardA2mV2, VirtualMachineSizeTypesStandardA2V2, VirtualMachineSizeTypesStandardA3, VirtualMachineSizeTypesStandardA4, VirtualMachineSizeTypesStandardA4mV2, VirtualMachineSizeTypesStandardA4V2, VirtualMachineSizeTypesStandardA5, VirtualMachineSizeTypesStandardA6, VirtualMachineSizeTypesStandardA7, VirtualMachineSizeTypesStandardA8, VirtualMachineSizeTypesStandardA8mV2, VirtualMachineSizeTypesStandardA8V2, VirtualMachineSizeTypesStandardA9, VirtualMachineSizeTypesStandardB1ms, VirtualMachineSizeTypesStandardB1s, VirtualMachineSizeTypesStandardB2ms, VirtualMachineSizeTypesStandardB2s, VirtualMachineSizeTypesStandardB4ms, VirtualMachineSizeTypesStandardB8ms, VirtualMachineSizeTypesStandardD1, VirtualMachineSizeTypesStandardD11, VirtualMachineSizeTypesStandardD11V2, VirtualMachineSizeTypesStandardD12, VirtualMachineSizeTypesStandardD12V2, VirtualMachineSizeTypesStandardD13, VirtualMachineSizeTypesStandardD13V2, VirtualMachineSizeTypesStandardD14, VirtualMachineSizeTypesStandardD14V2, VirtualMachineSizeTypesStandardD15V2, VirtualMachineSizeTypesStandardD16sV3, VirtualMachineSizeTypesStandardD16V3, VirtualMachineSizeTypesStandardD1V2, VirtualMachineSizeTypesStandardD2, VirtualMachineSizeTypesStandardD2sV3, VirtualMachineSizeTypesStandardD2V2, VirtualMachineSizeTypesStandardD2V3, VirtualMachineSizeTypesStandardD3, VirtualMachineSizeTypesStandardD32sV3, VirtualMachineSizeTypesStandardD32V3, VirtualMachineSizeTypesStandardD3V2, VirtualMachineSizeTypesStandardD4, VirtualMachineSizeTypesStandardD4sV3, VirtualMachineSizeTypesStandardD4V2, VirtualMachineSizeTypesStandardD4V3, VirtualMachineSizeTypesStandardD5V2, VirtualMachineSizeTypesStandardD64sV3, VirtualMachineSizeTypesStandardD64V3, VirtualMachineSizeTypesStandardD8sV3, VirtualMachineSizeTypesStandardD8V3, VirtualMachineSizeTypesStandardDS1, VirtualMachineSizeTypesStandardDS11, VirtualMachineSizeTypesStandardDS11V2, VirtualMachineSizeTypesStandardDS12, VirtualMachineSizeTypesStandardDS12V2, VirtualMachineSizeTypesStandardDS13, VirtualMachineSizeTypesStandardDS132V2, VirtualMachineSizeTypesStandardDS134V2, VirtualMachineSizeTypesStandardDS13V2, VirtualMachineSizeTypesStandardDS14, VirtualMachineSizeTypesStandardDS144V2, VirtualMachineSizeTypesStandardDS148V2, VirtualMachineSizeTypesStandardDS14V2, VirtualMachineSizeTypesStandardDS15V2, VirtualMachineSizeTypesStandardDS1V2, VirtualMachineSizeTypesStandardDS2, VirtualMachineSizeTypesStandardDS2V2, VirtualMachineSizeTypesStandardDS3, VirtualMachineSizeTypesStandardDS3V2, VirtualMachineSizeTypesStandardDS4, VirtualMachineSizeTypesStandardDS4V2, VirtualMachineSizeTypesStandardDS5V2, VirtualMachineSizeTypesStandardE16sV3, VirtualMachineSizeTypesStandardE16V3, VirtualMachineSizeTypesStandardE2sV3, VirtualMachineSizeTypesStandardE2V3, VirtualMachineSizeTypesStandardE3216V3, VirtualMachineSizeTypesStandardE328sV3, VirtualMachineSizeTypesStandardE32sV3, VirtualMachineSizeTypesStandardE32V3, VirtualMachineSizeTypesStandardE4sV3, VirtualMachineSizeTypesStandardE4V3, VirtualMachineSizeTypesStandardE6416sV3, VirtualMachineSizeTypesStandardE6432sV3, VirtualMachineSizeTypesStandardE64sV3, VirtualMachineSizeTypesStandardE64V3, VirtualMachineSizeTypesStandardE8sV3, VirtualMachineSizeTypesStandardE8V3, VirtualMachineSizeTypesStandardF1, VirtualMachineSizeTypesStandardF16, VirtualMachineSizeTypesStandardF16s, VirtualMachineSizeTypesStandardF16sV2, VirtualMachineSizeTypesStandardF1s, VirtualMachineSizeTypesStandardF2, VirtualMachineSizeTypesStandardF2s, VirtualMachineSizeTypesStandardF2sV2, VirtualMachineSizeTypesStandardF32sV2, VirtualMachineSizeTypesStandardF4, VirtualMachineSizeTypesStandardF4s, VirtualMachineSizeTypesStandardF4sV2, VirtualMachineSizeTypesStandardF64sV2, VirtualMachineSizeTypesStandardF72sV2, VirtualMachineSizeTypesStandardF8, VirtualMachineSizeTypesStandardF8s, VirtualMachineSizeTypesStandardF8sV2, VirtualMachineSizeTypesStandardG1, VirtualMachineSizeTypesStandardG2, VirtualMachineSizeTypesStandardG3, VirtualMachineSizeTypesStandardG4, VirtualMachineSizeTypesStandardG5, VirtualMachineSizeTypesStandardGS1, VirtualMachineSizeTypesStandardGS2, VirtualMachineSizeTypesStandardGS3, VirtualMachineSizeTypesStandardGS4, VirtualMachineSizeTypesStandardGS44, VirtualMachineSizeTypesStandardGS48, VirtualMachineSizeTypesStandardGS5, VirtualMachineSizeTypesStandardGS516, VirtualMachineSizeTypesStandardGS58, VirtualMachineSizeTypesStandardH16, VirtualMachineSizeTypesStandardH16m, VirtualMachineSizeTypesStandardH16mr, VirtualMachineSizeTypesStandardH16r, VirtualMachineSizeTypesStandardH8, VirtualMachineSizeTypesStandardH8m, VirtualMachineSizeTypesStandardL16s, VirtualMachineSizeTypesStandardL32s, VirtualMachineSizeTypesStandardL4s, VirtualMachineSizeTypesStandardL8s, VirtualMachineSizeTypesStandardM12832ms, VirtualMachineSizeTypesStandardM12864ms, VirtualMachineSizeTypesStandardM128ms, VirtualMachineSizeTypesStandardM128s, VirtualMachineSizeTypesStandardM6416ms, VirtualMachineSizeTypesStandardM6432ms, VirtualMachineSizeTypesStandardM64ms, VirtualMachineSizeTypesStandardM64s, VirtualMachineSizeTypesStandardNC12, VirtualMachineSizeTypesStandardNC12sV2, VirtualMachineSizeTypesStandardNC12sV3, VirtualMachineSizeTypesStandardNC24, VirtualMachineSizeTypesStandardNC24r, VirtualMachineSizeTypesStandardNC24rsV2, VirtualMachineSizeTypesStandardNC24rsV3, VirtualMachineSizeTypesStandardNC24sV2, VirtualMachineSizeTypesStandardNC24sV3, VirtualMachineSizeTypesStandardNC6, VirtualMachineSizeTypesStandardNC6sV2, VirtualMachineSizeTypesStandardNC6sV3, VirtualMachineSizeTypesStandardND12s, VirtualMachineSizeTypesStandardND24rs, VirtualMachineSizeTypesStandardND24s, VirtualMachineSizeTypesStandardND6s, VirtualMachineSizeTypesStandardNV12, VirtualMachineSizeTypesStandardNV24, VirtualMachineSizeTypesStandardNV6} } // AccessURI a disk access SAS uri. type AccessURI struct { autorest.Response `json:"-"` - // AccessURIOutput - Operation output data (raw JSON) - *AccessURIOutput `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccessURI. -func (au AccessURI) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if au.AccessURIOutput != nil { - objectMap["properties"] = au.AccessURIOutput - } - return json.Marshal(objectMap) + // AccessSAS - A SAS uri for accessing a disk. + AccessSAS *string `json:"accessSAS,omitempty"` } -// UnmarshalJSON is the custom unmarshaler for AccessURI struct. -func (au *AccessURI) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var accessURIOutput AccessURIOutput - err = json.Unmarshal(*v, &accessURIOutput) - if err != nil { - return err - } - au.AccessURIOutput = &accessURIOutput - } - } - } - - return nil +// AdditionalCapabilities enables or disables a capability on the virtual machine or virtual machine scale set. +type AdditionalCapabilities struct { + // UltraSSDEnabled - The flag that enables or disables a capability to have one or more managed data disks with UltraSSD_LRS storage account type on the VM or VMSS. Managed disks with storage account type UltraSSD_LRS can be added to a virtual machine or virtual machine scale set only if this property is enabled. + UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` } -// AccessURIOutput azure properties, including output. -type AccessURIOutput struct { - // AccessURIRaw - Operation output data (raw JSON) - *AccessURIRaw `json:"output,omitempty"` +// AdditionalUnattendContent specifies additional XML formatted information that can be included in the +// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the +// pass in which the content is applied. +type AdditionalUnattendContent struct { + // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem' + PassName PassNames `json:"passName,omitempty"` + // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup' + ComponentName ComponentNames `json:"componentName,omitempty"` + // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands' + SettingName SettingNames `json:"settingName,omitempty"` + // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted. + Content *string `json:"content,omitempty"` } -// MarshalJSON is the custom marshaler for AccessURIOutput. -func (auo AccessURIOutput) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if auo.AccessURIRaw != nil { - objectMap["output"] = auo.AccessURIRaw - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AccessURIOutput struct. -func (auo *AccessURIOutput) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "output": - if v != nil { - var accessURIRaw AccessURIRaw - err = json.Unmarshal(*v, &accessURIRaw) - if err != nil { - return err - } - auo.AccessURIRaw = &accessURIRaw - } - } - } - - return nil -} - -// AccessURIRaw a disk access SAS uri. -type AccessURIRaw struct { - // AccessSAS - A SAS uri for accessing a disk. - AccessSAS *string `json:"accessSAS,omitempty"` -} - -// AdditionalUnattendContent specifies additional XML formatted information that can be included in the -// Unattend.xml file, which is used by Windows Setup. Contents are defined by setting name, component name, and the -// pass in which the content is applied. -type AdditionalUnattendContent struct { - // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem' - PassName PassNames `json:"passName,omitempty"` - // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup' - ComponentName ComponentNames `json:"componentName,omitempty"` - // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands' - SettingName SettingNames `json:"settingName,omitempty"` - // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted. - Content *string `json:"content,omitempty"` -} - -// APIEntityReference the API entity reference. -type APIEntityReference struct { - // ID - The ARM resource id in the form of /subscriptions/{SubcriptionId}/resourceGroups/{ResourceGroupName}/... - ID *string `json:"id,omitempty"` +// APIEntityReference the API entity reference. +type APIEntityReference struct { + // ID - The ARM resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/... + ID *string `json:"id,omitempty"` } // APIError api error. @@ -924,24 +1266,32 @@ type APIErrorBase struct { Message *string `json:"message,omitempty"` } -// AutoOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade. -type AutoOSUpgradePolicy struct { - // DisableAutoRollback - Whether OS image rollback feature should be disabled. Default value is false. - DisableAutoRollback *bool `json:"disableAutoRollback,omitempty"` +// AutomaticOSUpgradePolicy the configuration parameters used for performing automatic OS upgrade. +type AutomaticOSUpgradePolicy struct { + // EnableAutomaticOSUpgrade - Whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the image becomes available. Default value is false. + EnableAutomaticOSUpgrade *bool `json:"enableAutomaticOSUpgrade,omitempty"` + // DisableAutomaticRollback - Whether OS image rollback feature should be disabled. Default value is false. + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty"` +} + +// AutomaticOSUpgradeProperties describes automatic OS upgrade properties on the image. +type AutomaticOSUpgradeProperties struct { + // AutomaticOSUpgradeSupported - Specifies whether automatic OS upgrade is supported on the image. + AutomaticOSUpgradeSupported *bool `json:"automaticOSUpgradeSupported,omitempty"` } // AvailabilitySet specifies information about the availability set that the virtual machine should be assigned to. // Virtual machines specified in the same availability set are allocated to different nodes to maximize // availability. For more information about availability sets, see [Manage the availability of virtual // machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). -//

For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in +//

For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in // Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) //

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added // to an availability set. type AvailabilitySet struct { autorest.Response `json:"-"` *AvailabilitySetProperties `json:"properties,omitempty"` - // Sku - Sku of the availability set + // Sku - Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use 'Aligned' for virtual machines with managed disks and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'. Sku *Sku `json:"sku,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` @@ -1242,9 +1592,8 @@ func (asu *AvailabilitySetUpdate) UnmarshalJSON(body []byte) error { } // BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot -// to diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log. -//

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from -// the hypervisor. +// to diagnose VM status.

You can easily view the output of your console log.

Azure also enables +// you to see a screenshot of the VM from the hypervisor. type BootDiagnostics struct { // Enabled - Whether boot diagnostics should be enabled on the Virtual Machine. Enabled *bool `json:"enabled,omitempty"` @@ -1258,65 +1607,19 @@ type BootDiagnosticsInstanceView struct { ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` // SerialConsoleLogBlobURI - The Linux serial console log blob Uri. SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` + // Status - The boot diagnostics status information for the VM.

NOTE: It will be set only if there are errors encountered in enabling boot diagnostics. + Status *InstanceViewStatus `json:"status,omitempty"` } -// CreationData data used when creating a disk. -type CreationData struct { - // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore' - CreateOption DiskCreateOption `json:"createOption,omitempty"` - // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription - StorageAccountID *string `json:"storageAccountId,omitempty"` - // ImageReference - Disk source information. - ImageReference *ImageDiskReference `json:"imageReference,omitempty"` - // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. - SourceURI *string `json:"sourceUri,omitempty"` - // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. - SourceResourceID *string `json:"sourceResourceId,omitempty"` -} - -// DataDisk describes a data disk. -type DataDisk struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` - // Name - The disk name. - Name *string `json:"name,omitempty"` - // Vhd - The virtual hard disk. - Vhd *VirtualHardDisk `json:"vhd,omitempty"` - // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. - Image *VirtualHardDisk `json:"image,omitempty"` - // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' - Caching CachingTypes `json:"caching,omitempty"` - // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. - WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` - // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' - CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // ManagedDisk - The managed disk parameters. - ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` -} - -// DataDiskImage contains the data disk images information. -type DataDiskImage struct { - // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. - Lun *int32 `json:"lun,omitempty"` -} - -// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. -type DiagnosticsProfile struct { - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log.

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. - BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +// CloudError an error response from the Gallery service. +type CloudError struct { + Error *APIError `json:"error,omitempty"` } -// Disk disk resource. -type Disk struct { - autorest.Response `json:"-"` - // ManagedBy - A relative URI containing the ID of the VM that has the disk attached. - ManagedBy *string `json:"managedBy,omitempty"` - Sku *DiskSku `json:"sku,omitempty"` - // Zones - The Logical zone list for Disk. - Zones *[]string `json:"zones,omitempty"` - *DiskProperties `json:"properties,omitempty"` +// ContainerService container service. +type ContainerService struct { + autorest.Response `json:"-"` + *ContainerServiceProperties `json:"properties,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` // Name - Resource name @@ -1329,41 +1632,32 @@ type Disk struct { Tags map[string]*string `json:"tags"` } -// MarshalJSON is the custom marshaler for Disk. -func (d Disk) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for ContainerService. +func (cs ContainerService) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if d.ManagedBy != nil { - objectMap["managedBy"] = d.ManagedBy - } - if d.Sku != nil { - objectMap["sku"] = d.Sku - } - if d.Zones != nil { - objectMap["zones"] = d.Zones + if cs.ContainerServiceProperties != nil { + objectMap["properties"] = cs.ContainerServiceProperties } - if d.DiskProperties != nil { - objectMap["properties"] = d.DiskProperties - } - if d.ID != nil { - objectMap["id"] = d.ID + if cs.ID != nil { + objectMap["id"] = cs.ID } - if d.Name != nil { - objectMap["name"] = d.Name + if cs.Name != nil { + objectMap["name"] = cs.Name } - if d.Type != nil { - objectMap["type"] = d.Type + if cs.Type != nil { + objectMap["type"] = cs.Type } - if d.Location != nil { - objectMap["location"] = d.Location + if cs.Location != nil { + objectMap["location"] = cs.Location } - if d.Tags != nil { - objectMap["tags"] = d.Tags + if cs.Tags != nil { + objectMap["tags"] = cs.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for Disk struct. -func (d *Disk) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for ContainerService struct. +func (cs *ContainerService) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1371,41 +1665,14 @@ func (d *Disk) UnmarshalJSON(body []byte) error { } for k, v := range m { switch k { - case "managedBy": - if v != nil { - var managedBy string - err = json.Unmarshal(*v, &managedBy) - if err != nil { - return err - } - d.ManagedBy = &managedBy - } - case "sku": - if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - d.Sku = &sku - } - case "zones": - if v != nil { - var zones []string - err = json.Unmarshal(*v, &zones) - if err != nil { - return err - } - d.Zones = &zones - } case "properties": if v != nil { - var diskProperties DiskProperties - err = json.Unmarshal(*v, &diskProperties) + var containerServiceProperties ContainerServiceProperties + err = json.Unmarshal(*v, &containerServiceProperties) if err != nil { return err } - d.DiskProperties = &diskProperties + cs.ContainerServiceProperties = &containerServiceProperties } case "id": if v != nil { @@ -1414,7 +1681,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.ID = &ID + cs.ID = &ID } case "name": if v != nil { @@ -1423,7 +1690,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Name = &name + cs.Name = &name } case "type": if v != nil { @@ -1432,7 +1699,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Type = &typeVar + cs.Type = &typeVar } case "location": if v != nil { @@ -1441,7 +1708,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Location = &location + cs.Location = &location } case "tags": if v != nil { @@ -1450,7 +1717,7 @@ func (d *Disk) UnmarshalJSON(body []byte) error { if err != nil { return err } - d.Tags = tags + cs.Tags = tags } } } @@ -1458,44 +1725,58 @@ func (d *Disk) UnmarshalJSON(body []byte) error { return nil } -// DiskEncryptionSettings describes a Encryption Settings for a Disk -type DiskEncryptionSettings struct { - // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. - DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. - KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` - // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. - Enabled *bool `json:"enabled,omitempty"` +// ContainerServiceAgentPoolProfile profile for the container service agent pool. +type ContainerServiceAgentPoolProfile struct { + // Name - Unique name of the agent pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5' + VMSize ContainerServiceVMSizeTypes `json:"vmSize,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for the agent pool. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FQDN for the agent pool. + Fqdn *string `json:"fqdn,omitempty"` } -// DiskInstanceView the instance view of the disk. -type DiskInstanceView struct { - // Name - The disk name. - Name *string `json:"name,omitempty"` - // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 - EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` - // Statuses - The resource status information. - Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +// ContainerServiceCustomProfile properties to configure a custom container service cluster. +type ContainerServiceCustomProfile struct { + // Orchestrator - The name of the custom orchestrator to use. + Orchestrator *string `json:"orchestrator,omitempty"` } -// DiskList the List Disks operation response. -type DiskList struct { +// ContainerServiceDiagnosticsProfile ... +type ContainerServiceDiagnosticsProfile struct { + // VMDiagnostics - Profile for the container service VM diagnostic agent. + VMDiagnostics *ContainerServiceVMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// ContainerServiceLinuxProfile profile for Linux VMs in the container service cluster. +type ContainerServiceLinuxProfile struct { + // AdminUsername - The administrator username to use for Linux VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // SSH - The ssh key configuration for Linux VMs. + SSH *ContainerServiceSSHConfiguration `json:"ssh,omitempty"` +} + +// ContainerServiceListResult the response from the List Container Services operation. +type ContainerServiceListResult struct { autorest.Response `json:"-"` - // Value - A list of disks. - Value *[]Disk `json:"value,omitempty"` - // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + // Value - the list of container services. + Value *[]ContainerService `json:"value,omitempty"` + // NextLink - The URL to get the next set of container service results. NextLink *string `json:"nextLink,omitempty"` } -// DiskListIterator provides access to a complete listing of Disk values. -type DiskListIterator struct { +// ContainerServiceListResultIterator provides access to a complete listing of ContainerService values. +type ContainerServiceListResultIterator struct { i int - page DiskListPage + page ContainerServiceListResultPage } // Next advances to the next value. If there was an error making // the request the iterator does not advance and the error is returned. -func (iter *DiskListIterator) Next() error { +func (iter *ContainerServiceListResultIterator) Next() error { iter.i++ if iter.i < len(iter.page.Values()) { return nil @@ -1510,253 +1791,1325 @@ func (iter *DiskListIterator) Next() error { } // NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DiskListIterator) NotDone() bool { +func (iter ContainerServiceListResultIterator) NotDone() bool { return iter.page.NotDone() && iter.i < len(iter.page.Values()) } // Response returns the raw server response from the last page request. -func (iter DiskListIterator) Response() DiskList { +func (iter ContainerServiceListResultIterator) Response() ContainerServiceListResult { return iter.page.Response() } // Value returns the current value or a zero-initialized value if the // iterator has advanced beyond the end of the collection. -func (iter DiskListIterator) Value() Disk { +func (iter ContainerServiceListResultIterator) Value() ContainerService { if !iter.page.NotDone() { - return Disk{} + return ContainerService{} } return iter.page.Values()[iter.i] } // IsEmpty returns true if the ListResult contains no values. -func (dl DiskList) IsEmpty() bool { - return dl.Value == nil || len(*dl.Value) == 0 +func (cslr ContainerServiceListResult) IsEmpty() bool { + return cslr.Value == nil || len(*cslr.Value) == 0 } -// diskListPreparer prepares a request to retrieve the next set of results. +// containerServiceListResultPreparer prepares a request to retrieve the next set of results. // It returns nil if no more results exist. -func (dl DiskList) diskListPreparer() (*http.Request, error) { - if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { +func (cslr ContainerServiceListResult) containerServiceListResultPreparer() (*http.Request, error) { + if cslr.NextLink == nil || len(to.String(cslr.NextLink)) < 1 { return nil, nil } return autorest.Prepare(&http.Request{}, autorest.AsJSON(), autorest.AsGet(), - autorest.WithBaseURL(to.String(dl.NextLink))) + autorest.WithBaseURL(to.String(cslr.NextLink))) } -// DiskListPage contains a page of Disk values. -type DiskListPage struct { - fn func(DiskList) (DiskList, error) - dl DiskList +// ContainerServiceListResultPage contains a page of ContainerService values. +type ContainerServiceListResultPage struct { + fn func(ContainerServiceListResult) (ContainerServiceListResult, error) + cslr ContainerServiceListResult } // Next advances to the next page of values. If there was an error making // the request the page does not advance and the error is returned. -func (page *DiskListPage) Next() error { - next, err := page.fn(page.dl) +func (page *ContainerServiceListResultPage) Next() error { + next, err := page.fn(page.cslr) if err != nil { return err } - page.dl = next + page.cslr = next return nil } // NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DiskListPage) NotDone() bool { - return !page.dl.IsEmpty() +func (page ContainerServiceListResultPage) NotDone() bool { + return !page.cslr.IsEmpty() } // Response returns the raw server response from the last page request. -func (page DiskListPage) Response() DiskList { - return page.dl +func (page ContainerServiceListResultPage) Response() ContainerServiceListResult { + return page.cslr } // Values returns the slice of values for the current page or nil if there are no values. -func (page DiskListPage) Values() []Disk { - if page.dl.IsEmpty() { +func (page ContainerServiceListResultPage) Values() []ContainerService { + if page.cslr.IsEmpty() { return nil } - return *page.dl.Value + return *page.cslr.Value } -// DiskProperties disk resource properties. -type DiskProperties struct { - // TimeCreated - The time when the disk was created. - TimeCreated *date.Time `json:"timeCreated,omitempty"` - // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. - CreationData *CreationData `json:"creationData,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettings - Encryption settings for disk or snapshot - EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` - // ProvisioningState - The disk provisioning state. - ProvisioningState *string `json:"provisioningState,omitempty"` +// ContainerServiceMasterProfile profile for the container service master. +type ContainerServiceMasterProfile struct { + // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1. + Count *int32 `json:"count,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for master. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FQDN for the master. + Fqdn *string `json:"fqdn,omitempty"` } -// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksCreateOrUpdateFuture struct { +// ContainerServiceOrchestratorProfile profile for the container service orchestrator. +type ContainerServiceOrchestratorProfile struct { + // OrchestratorType - The orchestrator to use to manage container service cluster resources. Valid values are Swarm, DCOS, and Custom. Possible values include: 'Swarm', 'DCOS', 'Custom', 'Kubernetes' + OrchestratorType ContainerServiceOrchestratorTypes `json:"orchestratorType,omitempty"` +} + +// ContainerServiceProperties properties of the container service. +type ContainerServiceProperties struct { + // ProvisioningState - the current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // OrchestratorProfile - Properties of the orchestrator. + OrchestratorProfile *ContainerServiceOrchestratorProfile `json:"orchestratorProfile,omitempty"` + // CustomProfile - Properties for custom clusters. + CustomProfile *ContainerServiceCustomProfile `json:"customProfile,omitempty"` + // ServicePrincipalProfile - Properties for cluster service principals. + ServicePrincipalProfile *ContainerServiceServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + // MasterProfile - Properties of master agents. + MasterProfile *ContainerServiceMasterProfile `json:"masterProfile,omitempty"` + // AgentPoolProfiles - Properties of the agent pool. + AgentPoolProfiles *[]ContainerServiceAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // WindowsProfile - Properties of Windows VMs. + WindowsProfile *ContainerServiceWindowsProfile `json:"windowsProfile,omitempty"` + // LinuxProfile - Properties of Linux VMs. + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + // DiagnosticsProfile - Properties of the diagnostic agent. + DiagnosticsProfile *ContainerServiceDiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// ContainerServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ContainerServicesCreateOrUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { +func (future *ContainerServicesCreateOrUpdateFuture) Result(client ContainerServicesClient) (cs ContainerService, err error) { var done bool done, err = future.Done(client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesCreateOrUpdateFuture") return } sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.CreateOrUpdateResponder(d.Response.Response) + if cs.Response.Response, err = future.GetResult(sender); err == nil && cs.Response.Response.StatusCode != http.StatusNoContent { + cs, err = client.CreateOrUpdateResponder(cs.Response.Response) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + err = autorest.NewErrorWithError(err, "compute.ContainerServicesCreateOrUpdateFuture", "Result", cs.Response.Response, "Failure responding to request") } } return } -// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksDeleteFuture struct { +// ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ContainerServicesDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { +func (future *ContainerServicesDeleteFuture) Result(client ContainerServicesClient) (ar autorest.Response, err error) { var done bool done, err = future.Done(client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.ContainerServicesDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + err = azure.NewAsyncOpIncompleteError("compute.ContainerServicesDeleteFuture") return } ar.Response = future.Response() return } -// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksGrantAccessFuture struct { - azure.Future +// ContainerServiceServicePrincipalProfile information about a service principal identity for the cluster to use +// for manipulating Azure APIs. +type ContainerServiceServicePrincipalProfile struct { + // ClientID - The ID for the service principal. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the service principal. + Secret *string `json:"secret,omitempty"` } -// Result returns the result of the asynchronous operation. -// If the operation has not completed it will return an error. -func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { - var done bool - done, err = future.Done(client) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") - return +// ContainerServiceSSHConfiguration SSH configuration for Linux-based VMs running on Azure. +type ContainerServiceSSHConfiguration struct { + // PublicKeys - the list of SSH public keys used to authenticate with Linux-based VMs. + PublicKeys *[]ContainerServiceSSHPublicKey `json:"publicKeys,omitempty"` +} + +// ContainerServiceSSHPublicKey contains information about SSH certificate public key data. +type ContainerServiceSSHPublicKey struct { + // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers. + KeyData *string `json:"keyData,omitempty"` +} + +// ContainerServiceVMDiagnostics profile for diagnostics on the container service VMs. +type ContainerServiceVMDiagnostics struct { + // Enabled - Whether the VM diagnostic agent is provisioned on the VM. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - The URI of the storage account where diagnostics are stored. + StorageURI *string `json:"storageUri,omitempty"` +} + +// ContainerServiceWindowsProfile profile for Windows VMs in the container service cluster. +type ContainerServiceWindowsProfile struct { + // AdminUsername - The administrator username to use for Windows VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - The administrator password to use for Windows VMs. + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// CreationData data used when creating a disk. +type CreationData struct { + // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy', 'Restore', 'Upload' + CreateOption DiskCreateOption `json:"createOption,omitempty"` + // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ImageReference - Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + +// DataDisk describes a data disk. +type DataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// DataDiskImage contains the data disk images information. +type DataDiskImage struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` +} + +// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. +type DiagnosticsProfile struct { + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

You can easily view the output of your console log.

Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiffDiskSettings describes the parameters of ephemeral disk settings that can be specified for operating system +// disk.

NOTE: The ephemeral disk settings can only be specified for managed disk. +type DiffDiskSettings struct { + // Option - Specifies the ephemeral disk settings for operating system disk. Possible values include: 'Local' + Option DiffDiskOptions `json:"option,omitempty"` +} + +// Disallowed describes the disallowed disk types. +type Disallowed struct { + // DiskTypes - A list of disk types. + DiskTypes *[]string `json:"diskTypes,omitempty"` +} + +// Disk disk resource. +type Disk struct { + autorest.Response `json:"-"` + // ManagedBy - A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + // Zones - The Logical zone list for Disk. + Zones *[]string `json:"zones,omitempty"` + *DiskProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Disk. +func (d Disk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if d.ManagedBy != nil { + objectMap["managedBy"] = d.ManagedBy + } + if d.Sku != nil { + objectMap["sku"] = d.Sku + } + if d.Zones != nil { + objectMap["zones"] = d.Zones + } + if d.DiskProperties != nil { + objectMap["properties"] = d.DiskProperties + } + if d.ID != nil { + objectMap["id"] = d.ID + } + if d.Name != nil { + objectMap["name"] = d.Name + } + if d.Type != nil { + objectMap["type"] = d.Type + } + if d.Location != nil { + objectMap["location"] = d.Location + } + if d.Tags != nil { + objectMap["tags"] = d.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Disk struct. +func (d *Disk) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "managedBy": + if v != nil { + var managedBy string + err = json.Unmarshal(*v, &managedBy) + if err != nil { + return err + } + d.ManagedBy = &managedBy + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + d.Sku = &sku + } + case "zones": + if v != nil { + var zones []string + err = json.Unmarshal(*v, &zones) + if err != nil { + return err + } + d.Zones = &zones + } + case "properties": + if v != nil { + var diskProperties DiskProperties + err = json.Unmarshal(*v, &diskProperties) + if err != nil { + return err + } + d.DiskProperties = &diskProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + d.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + d.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + d.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + d.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + d.Tags = tags + } + } + } + + return nil +} + +// DiskEncryptionSettings describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskInstanceView the instance view of the disk. +type DiskInstanceView struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer() (*http.Request, error) { + if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(DiskList) (DiskList, error) + dl DiskList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) Next() error { + next, err := page.fn(page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // ProvisioningState - The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` + // DiskState - The state of the disk. Possible values include: 'Unattached', 'Attached', 'Reserved', 'ActiveSAS', 'ReadyToUpload', 'ActiveUpload' + DiskState DiskState `json:"diskState,omitempty"` +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.CreateOrUpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksCreateOrUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksDeleteFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksGrantAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksGrantAccessFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { + au, err = client.GrantAccessResponder(au.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, or UltraSSD_LRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardSSDLRS', 'UltraSSDLRS' + Name DiskStorageAccountTypes `json:"name,omitempty"` + // Tier - The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksRevokeAccessFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + return + } + ar.Response = future.Response() + return +} + +// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { + d, err = client.UpdateResponder(d.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") + } + } + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + *DiskUpdateProperties `json:"properties,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiskUpdate. +func (du DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if du.DiskUpdateProperties != nil { + objectMap["properties"] = du.DiskUpdateProperties + } + if du.Tags != nil { + objectMap["tags"] = du.Tags + } + if du.Sku != nil { + objectMap["sku"] = du.Sku + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. +func (du *DiskUpdate) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diskUpdateProperties DiskUpdateProperties + err = json.Unmarshal(*v, &diskUpdateProperties) + if err != nil { + return err + } + du.DiskUpdateProperties = &diskUpdateProperties + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + du.Tags = tags + } + case "sku": + if v != nil { + var sku DiskSku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + du.Sku = &sku + } + } + } + + return nil +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // DiskIOPSReadWrite - The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + // DiskMBpsReadWrite - The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int32 `json:"diskMBpsReadWrite,omitempty"` +} + +// EncryptionSettingsCollection encryption settings for disk or snapshot +type EncryptionSettingsCollection struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // EncryptionSettings - A collection of encryption settings, one for each disk volume. + EncryptionSettings *[]EncryptionSettingsElement `json:"encryptionSettings,omitempty"` +} + +// EncryptionSettingsElement encryption settings for one disk volume. +type EncryptionSettingsElement struct { + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GalleriesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleriesCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesCreateOrUpdateFuture) Result(client GalleriesClient) (g Gallery, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if g.Response.Response, err = future.GetResult(sender); err == nil && g.Response.Response.StatusCode != http.StatusNoContent { + g, err = client.CreateOrUpdateResponder(g.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesCreateOrUpdateFuture", "Result", g.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleriesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GalleriesDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleriesDeleteFuture) Result(client GalleriesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleriesDeleteFuture", "Result", future.Response(), "Polling failure") + return } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if au.Response.Response, err = future.GetResult(sender); err == nil && au.Response.Response.StatusCode != http.StatusNoContent { - au, err = client.GrantAccessResponder(au.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksGrantAccessFuture", "Result", au.Response.Response, "Failure responding to request") + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleriesDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// Gallery specifies information about the Shared Image Gallery that you want to create or update. +type Gallery struct { + autorest.Response `json:"-"` + *GalleryProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Gallery. +func (g Gallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.GalleryProperties != nil { + objectMap["properties"] = g.GalleryProperties + } + if g.ID != nil { + objectMap["id"] = g.ID + } + if g.Name != nil { + objectMap["name"] = g.Name + } + if g.Type != nil { + objectMap["type"] = g.Type + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Gallery struct. +func (g *Gallery) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryProperties GalleryProperties + err = json.Unmarshal(*v, &galleryProperties) + if err != nil { + return err + } + g.GalleryProperties = &galleryProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + g.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + g.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + g.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + g.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + g.Tags = tags + } + } + } + + return nil +} + +// GalleryArtifactPublishingProfileBase describes the basic gallery artifact publishing profile. +type GalleryArtifactPublishingProfileBase struct { + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + Source *GalleryArtifactSource `json:"source,omitempty"` +} + +// GalleryArtifactSource the source image from which the Image Version is going to be created. +type GalleryArtifactSource struct { + ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` +} + +// GalleryDataDiskImage this is the data disk image. +type GalleryDataDiskImage struct { + // Lun - This property specifies the logical unit number of the data disk. This value is used to identify data disks within the Virtual Machine and therefore must be unique for each data disk attached to the Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryDiskImage this is the disk image base class. +type GalleryDiskImage struct { + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryIdentifier describes the gallery unique name. +type GalleryIdentifier struct { + // UniqueName - The unique name of the Shared Image Gallery. This name is generated automatically by Azure. + UniqueName *string `json:"uniqueName,omitempty"` +} + +// GalleryImage specifies information about the gallery Image Definition that you want to create or update. +type GalleryImage struct { + autorest.Response `json:"-"` + *GalleryImageProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GalleryImage. +func (gi GalleryImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gi.GalleryImageProperties != nil { + objectMap["properties"] = gi.GalleryImageProperties + } + if gi.ID != nil { + objectMap["id"] = gi.ID + } + if gi.Name != nil { + objectMap["name"] = gi.Name + } + if gi.Type != nil { + objectMap["type"] = gi.Type + } + if gi.Location != nil { + objectMap["location"] = gi.Location + } + if gi.Tags != nil { + objectMap["tags"] = gi.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for GalleryImage struct. +func (gi *GalleryImage) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var galleryImageProperties GalleryImageProperties + err = json.Unmarshal(*v, &galleryImageProperties) + if err != nil { + return err + } + gi.GalleryImageProperties = &galleryImageProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + gi.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + gi.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + gi.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + gi.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + gi.Tags = tags + } } } - return + + return nil +} + +// GalleryImageIdentifier this is the gallery Image Definition identifier. +type GalleryImageIdentifier struct { + // Publisher - The name of the gallery Image Definition publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - The name of the gallery Image Definition offer. + Offer *string `json:"offer,omitempty"` + // Sku - The name of the gallery Image Definition SKU. + Sku *string `json:"sku,omitempty"` +} + +// GalleryImageList the List Gallery Images operation response. +type GalleryImageList struct { + autorest.Response `json:"-"` + // Value - A list of Shared Image Gallery images. + Value *[]GalleryImage `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the next page of gallery Image Definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageListIterator provides access to a complete listing of GalleryImage values. +type GalleryImageListIterator struct { + i int + page GalleryImageListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryImageListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryImageListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryImageListIterator) Response() GalleryImageList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryImageListIterator) Value() GalleryImage { + if !iter.page.NotDone() { + return GalleryImage{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (gil GalleryImageList) IsEmpty() bool { + return gil.Value == nil || len(*gil.Value) == 0 +} + +// galleryImageListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gil GalleryImageList) galleryImageListPreparer() (*http.Request, error) { + if gil.NextLink == nil || len(to.String(gil.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gil.NextLink))) +} + +// GalleryImageListPage contains a page of GalleryImage values. +type GalleryImageListPage struct { + fn func(GalleryImageList) (GalleryImageList, error) + gil GalleryImageList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryImageListPage) Next() error { + next, err := page.fn(page.gil) + if err != nil { + return err + } + page.gil = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryImageListPage) NotDone() bool { + return !page.gil.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryImageListPage) Response() GalleryImageList { + return page.gil +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryImageListPage) Values() []GalleryImage { + if page.gil.IsEmpty() { + return nil + } + return *page.gil.Value } -// DiskSku the disks sku name. Can be Standard_LRS, Premium_LRS, or StandardSSD_LRS. -type DiskSku struct { - // Name - The sku name. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' - Name StorageAccountTypes `json:"name,omitempty"` - // Tier - The sku tier. - Tier *string `json:"tier,omitempty"` +// GalleryImageProperties describes the properties of a gallery Image Definition. +type GalleryImageProperties struct { + // Description - The description of this gallery Image Definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + // Eula - The Eula agreement for the gallery Image Definition. + Eula *string `json:"eula,omitempty"` + // PrivacyStatementURI - The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + // ReleaseNoteURI - The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + // OsType - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - The allowed values for OS State are 'Generalized'. Possible values include: 'Generalized', 'Specialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Definition. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + Disallowed *Disallowed `json:"disallowed,omitempty"` + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState1Creating', 'ProvisioningState1Updating', 'ProvisioningState1Failed', 'ProvisioningState1Succeeded', 'ProvisioningState1Deleting', 'ProvisioningState1Migrating' + ProvisioningState ProvisioningState1 `json:"provisioningState,omitempty"` } -// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksRevokeAccessFuture struct { +// GalleryImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImagesCreateOrUpdateFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksRevokeAccessFuture) Result(client DisksClient) (ar autorest.Response, err error) { +func (future *GalleryImagesCreateOrUpdateFuture) Result(client GalleryImagesClient) (gi GalleryImage, err error) { var done bool done, err = future.Done(client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksRevokeAccessFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksRevokeAccessFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesCreateOrUpdateFuture") return } - ar.Response = future.Response() + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gi.Response.Response, err = future.GetResult(sender); err == nil && gi.Response.Response.StatusCode != http.StatusNoContent { + gi, err = client.CreateOrUpdateResponder(gi.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImagesCreateOrUpdateFuture", "Result", gi.Response.Response, "Failure responding to request") + } + } return } -// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. -type DisksUpdateFuture struct { +// GalleryImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GalleryImagesDeleteFuture struct { azure.Future } // Result returns the result of the asynchronous operation. // If the operation has not completed it will return an error. -func (future *DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { +func (future *GalleryImagesDeleteFuture) Result(client GalleryImagesClient) (ar autorest.Response, err error) { var done bool done, err = future.Done(client) if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", future.Response(), "Polling failure") + err = autorest.NewErrorWithError(err, "compute.GalleryImagesDeleteFuture", "Result", future.Response(), "Polling failure") return } if !done { - err = azure.NewAsyncOpIncompleteError("compute.DisksUpdateFuture") + err = azure.NewAsyncOpIncompleteError("compute.GalleryImagesDeleteFuture") return } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if d.Response.Response, err = future.GetResult(sender); err == nil && d.Response.Response.StatusCode != http.StatusNoContent { - d, err = client.UpdateResponder(d.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "compute.DisksUpdateFuture", "Result", d.Response.Response, "Failure responding to request") - } - } + ar.Response = future.Response() return } -// DiskUpdate disk update resource. -type DiskUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` +// GalleryImageVersion specifies information about the gallery Image Version that you want to create or update. +type GalleryImageVersion struct { + autorest.Response `json:"-"` + *GalleryImageVersionProperties `json:"properties,omitempty"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` - Sku *DiskSku `json:"sku,omitempty"` } -// MarshalJSON is the custom marshaler for DiskUpdate. -func (du DiskUpdate) MarshalJSON() ([]byte, error) { +// MarshalJSON is the custom marshaler for GalleryImageVersion. +func (giv GalleryImageVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if du.DiskUpdateProperties != nil { - objectMap["properties"] = du.DiskUpdateProperties + if giv.GalleryImageVersionProperties != nil { + objectMap["properties"] = giv.GalleryImageVersionProperties } - if du.Tags != nil { - objectMap["tags"] = du.Tags + if giv.ID != nil { + objectMap["id"] = giv.ID } - if du.Sku != nil { - objectMap["sku"] = du.Sku + if giv.Name != nil { + objectMap["name"] = giv.Name + } + if giv.Type != nil { + objectMap["type"] = giv.Type + } + if giv.Location != nil { + objectMap["location"] = giv.Location + } + if giv.Tags != nil { + objectMap["tags"] = giv.Tags } return json.Marshal(objectMap) } -// UnmarshalJSON is the custom unmarshaler for DiskUpdate struct. -func (du *DiskUpdate) UnmarshalJSON(body []byte) error { +// UnmarshalJSON is the custom unmarshaler for GalleryImageVersion struct. +func (giv *GalleryImageVersion) UnmarshalJSON(body []byte) error { var m map[string]*json.RawMessage err := json.Unmarshal(body, &m) if err != nil { @@ -1766,30 +3119,57 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) + var galleryImageVersionProperties GalleryImageVersionProperties + err = json.Unmarshal(*v, &galleryImageVersionProperties) if err != nil { return err } - du.DiskUpdateProperties = &diskUpdateProperties + giv.GalleryImageVersionProperties = &galleryImageVersionProperties } - case "tags": + case "id": if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) + var ID string + err = json.Unmarshal(*v, &ID) if err != nil { return err } - du.Tags = tags + giv.ID = &ID } - case "sku": + case "name": if v != nil { - var sku DiskSku - err = json.Unmarshal(*v, &sku) + var name string + err = json.Unmarshal(*v, &name) if err != nil { return err } - du.Sku = &sku + giv.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + giv.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + giv.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + giv.Tags = tags } } } @@ -1797,29 +3177,315 @@ func (du *DiskUpdate) UnmarshalJSON(body []byte) error { return nil } -// DiskUpdateProperties disk resource update properties. -type DiskUpdateProperties struct { - // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' - OsType OperatingSystemTypes `json:"osType,omitempty"` - // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. - DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // EncryptionSettings - Encryption settings for disk or snapshot - EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +// GalleryImageVersionList the List Gallery Image version operation response. +type GalleryImageVersionList struct { + autorest.Response `json:"-"` + // Value - A list of gallery Image Versions. + Value *[]GalleryImageVersion `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of gallery Image Versions. Call ListNext() with this to fetch the next page of gallery Image Versions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageVersionListIterator provides access to a complete listing of GalleryImageVersion values. +type GalleryImageVersionListIterator struct { + i int + page GalleryImageVersionListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryImageVersionListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryImageVersionListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryImageVersionListIterator) Response() GalleryImageVersionList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryImageVersionListIterator) Value() GalleryImageVersion { + if !iter.page.NotDone() { + return GalleryImageVersion{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (givl GalleryImageVersionList) IsEmpty() bool { + return givl.Value == nil || len(*givl.Value) == 0 +} + +// galleryImageVersionListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (givl GalleryImageVersionList) galleryImageVersionListPreparer() (*http.Request, error) { + if givl.NextLink == nil || len(to.String(givl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(givl.NextLink))) +} + +// GalleryImageVersionListPage contains a page of GalleryImageVersion values. +type GalleryImageVersionListPage struct { + fn func(GalleryImageVersionList) (GalleryImageVersionList, error) + givl GalleryImageVersionList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryImageVersionListPage) Next() error { + next, err := page.fn(page.givl) + if err != nil { + return err + } + page.givl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryImageVersionListPage) NotDone() bool { + return !page.givl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GalleryImageVersionListPage) Response() GalleryImageVersionList { + return page.givl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryImageVersionListPage) Values() []GalleryImageVersion { + if page.givl.IsEmpty() { + return nil + } + return *page.givl.Value +} + +// GalleryImageVersionProperties describes the properties of a gallery Image Version. +type GalleryImageVersionProperties struct { + PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningState2Creating', 'ProvisioningState2Updating', 'ProvisioningState2Failed', 'ProvisioningState2Succeeded', 'ProvisioningState2Deleting', 'ProvisioningState2Migrating' + ProvisioningState ProvisioningState2 `json:"provisioningState,omitempty"` + StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty"` +} + +// GalleryImageVersionPublishingProfile the publishing profile of a gallery Image Version. +type GalleryImageVersionPublishingProfile struct { + // ReplicaCount - The number of replicas of the Image Version to be created per region. This property would take effect for a region when regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + // ExcludeFromLatest - If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + // PublishedDate - The timestamp for when the gallery Image Version is published. + PublishedDate *date.Time `json:"publishedDate,omitempty"` + // EndOfLifeDate - The end of life date of the gallery Image Version. This property can be used for decommissioning purposes. This property is updatable. + EndOfLifeDate *date.Time `json:"endOfLifeDate,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` + // TargetRegions - The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions *[]TargetRegion `json:"targetRegions,omitempty"` + Source *GalleryArtifactSource `json:"source,omitempty"` +} + +// GalleryImageVersionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type GalleryImageVersionsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImageVersionsCreateOrUpdateFuture) Result(client GalleryImageVersionsClient) (giv GalleryImageVersion, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if giv.Response.Response, err = future.GetResult(sender); err == nil && giv.Response.Response.StatusCode != http.StatusNoContent { + giv, err = client.CreateOrUpdateResponder(giv.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsCreateOrUpdateFuture", "Result", giv.Response.Response, "Failure responding to request") + } + } + return +} + +// GalleryImageVersionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type GalleryImageVersionsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GalleryImageVersionsDeleteFuture) Result(client GalleryImageVersionsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.GalleryImageVersionsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.GalleryImageVersionsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// GalleryImageVersionStorageProfile this is the storage profile of a gallery Image Version. +type GalleryImageVersionStorageProfile struct { + OsDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"` + // DataDiskImages - A list of data disk images. + DataDiskImages *[]GalleryDataDiskImage `json:"dataDiskImages,omitempty"` +} + +// GalleryList the List Galleries operation response. +type GalleryList struct { + autorest.Response `json:"-"` + // Value - A list of galleries. + Value *[]Gallery `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryListIterator provides access to a complete listing of Gallery values. +type GalleryListIterator struct { + i int + page GalleryListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GalleryListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GalleryListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GalleryListIterator) Response() GalleryList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GalleryListIterator) Value() Gallery { + if !iter.page.NotDone() { + return Gallery{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (gl GalleryList) IsEmpty() bool { + return gl.Value == nil || len(*gl.Value) == 0 +} + +// galleryListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (gl GalleryList) galleryListPreparer() (*http.Request, error) { + if gl.NextLink == nil || len(to.String(gl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(gl.NextLink))) +} + +// GalleryListPage contains a page of Gallery values. +type GalleryListPage struct { + fn func(GalleryList) (GalleryList, error) + gl GalleryList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GalleryListPage) Next() error { + next, err := page.fn(page.gl) + if err != nil { + return err + } + page.gl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GalleryListPage) NotDone() bool { + return !page.gl.IsEmpty() } -// EncryptionSettings encryption settings for disk or snapshot -type EncryptionSettings struct { - // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. - Enabled *bool `json:"enabled,omitempty"` - // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key - DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` - // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key - KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +// Response returns the raw server response from the last page request. +func (page GalleryListPage) Response() GalleryList { + return page.gl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GalleryListPage) Values() []Gallery { + if page.gl.IsEmpty() { + return nil + } + return *page.gl.Value +} + +// GalleryOSDiskImage this is the OS disk image. +type GalleryOSDiskImage struct { + // SizeInGB - This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty"` + // HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite'. Possible values include: 'HostCachingNone', 'HostCachingReadOnly', 'HostCachingReadWrite' + HostCaching HostCaching `json:"hostCaching,omitempty"` +} + +// GalleryProperties describes the properties of a Shared Image Gallery. +type GalleryProperties struct { + // Description - The description of this Shared Image Gallery resource. This property is updatable. + Description *string `json:"description,omitempty"` + Identifier *GalleryIdentifier `json:"identifier,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. Possible values include: 'ProvisioningStateCreating', 'ProvisioningStateUpdating', 'ProvisioningStateFailed', 'ProvisioningStateSucceeded', 'ProvisioningStateDeleting', 'ProvisioningStateMigrating' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` } // GrantAccessData data used for requesting a SAS. type GrantAccessData struct { - // Access - Possible values include: 'None', 'Read' + // Access - Possible values include: 'None', 'Read', 'Write' Access AccessLevel `json:"access,omitempty"` // DurationInSeconds - Time duration in seconds until the SAS access expires. DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` @@ -1827,7 +3493,7 @@ type GrantAccessData struct { // HardwareProfile specifies the hardware settings for the virtual machine. type HardwareProfile struct { - // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). Possible values include: 'BasicA0', 'BasicA1', 'BasicA2', 'BasicA3', 'BasicA4', 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardA1V2', 'StandardA2V2', 'StandardA4V2', 'StandardA8V2', 'StandardA2mV2', 'StandardA4mV2', 'StandardA8mV2', 'StandardB1s', 'StandardB1ms', 'StandardB2s', 'StandardB2ms', 'StandardB4ms', 'StandardB8ms', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD2V3', 'StandardD4V3', 'StandardD8V3', 'StandardD16V3', 'StandardD32V3', 'StandardD64V3', 'StandardD2sV3', 'StandardD4sV3', 'StandardD8sV3', 'StandardD16sV3', 'StandardD32sV3', 'StandardD64sV3', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardD15V2', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardDS1V2', 'StandardDS2V2', 'StandardDS3V2', 'StandardDS4V2', 'StandardDS5V2', 'StandardDS11V2', 'StandardDS12V2', 'StandardDS13V2', 'StandardDS14V2', 'StandardDS15V2', 'StandardDS134V2', 'StandardDS132V2', 'StandardDS148V2', 'StandardDS144V2', 'StandardE2V3', 'StandardE4V3', 'StandardE8V3', 'StandardE16V3', 'StandardE32V3', 'StandardE64V3', 'StandardE2sV3', 'StandardE4sV3', 'StandardE8sV3', 'StandardE16sV3', 'StandardE32sV3', 'StandardE64sV3', 'StandardE3216V3', 'StandardE328sV3', 'StandardE6432sV3', 'StandardE6416sV3', 'StandardF1', 'StandardF2', 'StandardF4', 'StandardF8', 'StandardF16', 'StandardF1s', 'StandardF2s', 'StandardF4s', 'StandardF8s', 'StandardF16s', 'StandardF2sV2', 'StandardF4sV2', 'StandardF8sV2', 'StandardF16sV2', 'StandardF32sV2', 'StandardF64sV2', 'StandardF72sV2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5', 'StandardGS48', 'StandardGS44', 'StandardGS516', 'StandardGS58', 'StandardH8', 'StandardH16', 'StandardH8m', 'StandardH16m', 'StandardH16r', 'StandardH16mr', 'StandardL4s', 'StandardL8s', 'StandardL16s', 'StandardL32s', 'StandardM64s', 'StandardM64ms', 'StandardM128s', 'StandardM128ms', 'StandardM6432ms', 'StandardM6416ms', 'StandardM12864ms', 'StandardM12832ms', 'StandardNC6', 'StandardNC12', 'StandardNC24', 'StandardNC24r', 'StandardNC6sV2', 'StandardNC12sV2', 'StandardNC24sV2', 'StandardNC24rsV2', 'StandardNC6sV3', 'StandardNC12sV3', 'StandardNC24sV3', 'StandardNC24rsV3', 'StandardND6s', 'StandardND12s', 'StandardND24s', 'StandardND24rs', 'StandardNV6', 'StandardNV12', 'StandardNV24' + // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

[List all available virtual machine sizes in an availability set](https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes)

[List all available virtual machine sizes in a region](https://docs.microsoft.com/rest/api/compute/virtualmachinesizes/list)

[List all available virtual machine sizes for resizing](https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes). Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24' VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` } @@ -1955,13 +3621,13 @@ type ImageDataDisk struct { Caching CachingTypes `json:"caching,omitempty"` // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } // ImageDiskReference the source image used for creating the disk. type ImageDiskReference struct { - // ID - A relative uri containing either a Platform Imgage Repository or user image reference. + // ID - A relative uri containing either a Platform Image Repository or user image reference. ID *string `json:"id,omitempty"` // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. Lun *int32 `json:"lun,omitempty"` @@ -2085,7 +3751,7 @@ type ImageOSDisk struct { Caching CachingTypes `json:"caching,omitempty"` // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' + // StorageAccountType - Specifies the storage account type for the managed disk. UltraSSD_LRS cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } @@ -2099,6 +3765,16 @@ type ImageProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// ImagePurchasePlan describes the gallery Image Definition purchase plan. This is used by marketplace images. +type ImagePurchasePlan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - The product ID. + Product *string `json:"product,omitempty"` +} + // ImageReference specifies information about the image to use. You can specify information about platform images, // marketplace images, or virtual machine images. This element is required when you want to use a platform image, // marketplace image, or virtual machine image, but is not used in other creation operations. @@ -2320,6 +3996,8 @@ type LinuxConfiguration struct { DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` // SSH - Specifies the ssh key configuration for a Linux OS. SSH *SSHConfiguration `json:"ssh,omitempty"` + // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` } // ListUsagesResult the List Usages operation response. @@ -2504,7 +4182,7 @@ type LogAnalyticsInputBase struct { ToTime *date.Time `json:"toTime,omitempty"` // GroupByThrottlePolicy - Group query result by Throttle Policy applied. GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. + // GroupByOperationName - Group query result by Operation Name. GroupByOperationName *bool `json:"groupByOperationName,omitempty"` // GroupByResourceName - Group query result by Resource Name. GroupByResourceName *bool `json:"groupByResourceName,omitempty"` @@ -2541,9 +4219,15 @@ type MaintenanceRedeployStatus struct { LastOperationMessage *string `json:"lastOperationMessage,omitempty"` } +// ManagedArtifact the managed artifact. +type ManagedArtifact struct { + // ID - The managed artifact id. + ID *string `json:"id,omitempty"` +} + // ManagedDiskParameters the parameters of a managed disk. type ManagedDiskParameters struct { - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` @@ -2716,9 +4400,11 @@ type OSDisk struct { Caching CachingTypes `json:"caching,omitempty"` // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // DiffDiskSettings - Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` - // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // ManagedDisk - The managed disk parameters. ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` @@ -2732,7 +4418,7 @@ type OSDiskImage struct { // OSProfile specifies the operating system settings for the virtual machine. type OSProfile struct { - // ComputerName - Specifies the host OS name of the virtual machine.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). + // ComputerName - Specifies the host OS name of the virtual machine.

This name cannot be updated after the VM is created.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). ComputerName *string `json:"computerName,omitempty"` // AdminUsername - Specifies the name of the administrator account.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) AdminUsername *string `json:"adminUsername,omitempty"` @@ -2746,6 +4432,8 @@ type OSProfile struct { LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` // Secrets - Specifies set of certificates that should be installed onto the virtual machine. Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` + // AllowExtensionOperations - Specifies whether extension operations should be allowed on the virtual machine.

    This may only be set to False when no extensions are present on the virtual machine. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` } // Plan specifies information about the marketplace image used to create the virtual machine. This element is only @@ -2773,6 +4461,13 @@ type PurchasePlan struct { Product *string `json:"product,omitempty"` } +// RecommendedMachineConfiguration the properties describe the recommended machine configuration for this Image +// Definition. These properties are updatable. +type RecommendedMachineConfiguration struct { + VCPUs *ResourceRange `json:"vCPUs,omitempty"` + Memory *ResourceRange `json:"memory,omitempty"` +} + // RecoveryWalkResponse response after calling a manual recovery walk type RecoveryWalkResponse struct { autorest.Response `json:"-"` @@ -2782,6 +4477,26 @@ type RecoveryWalkResponse struct { NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` } +// RegionalReplicationStatus this is the regional replication status. +type RegionalReplicationStatus struct { + // Region - The region to which the gallery Image Version is being replicated to. + Region *string `json:"region,omitempty"` + // State - This is the regional replication state. Possible values include: 'ReplicationStateUnknown', 'ReplicationStateReplicating', 'ReplicationStateCompleted', 'ReplicationStateFailed' + State ReplicationState `json:"state,omitempty"` + // Details - The details of the replication status. + Details *string `json:"details,omitempty"` + // Progress - It indicates progress of the replication job. + Progress *int32 `json:"progress,omitempty"` +} + +// ReplicationStatus this is the replication status of the gallery Image Version. +type ReplicationStatus struct { + // AggregatedState - This is the aggregated replication status based on all the regional replication status flags. Possible values include: 'Unknown', 'InProgress', 'Completed', 'Failed' + AggregatedState AggregatedReplicationState `json:"aggregatedState,omitempty"` + // Summary - This is a summary of replication status for each region. + Summary *[]RegionalReplicationStatus `json:"summary,omitempty"` +} + // RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. type RequestRateByIntervalInput struct { // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins' @@ -2794,7 +4509,7 @@ type RequestRateByIntervalInput struct { ToTime *date.Time `json:"toTime,omitempty"` // GroupByThrottlePolicy - Group query result by Throttle Policy applied. GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. + // GroupByOperationName - Group query result by Operation Name. GroupByOperationName *bool `json:"groupByOperationName,omitempty"` // GroupByResourceName - Group query result by Resource Name. GroupByResourceName *bool `json:"groupByResourceName,omitempty"` @@ -2835,6 +4550,204 @@ func (r Resource) MarshalJSON() ([]byte, error) { return json.Marshal(objectMap) } +// ResourceRange describes the resource range. +type ResourceRange struct { + // Min - The minimum number of the resource. + Min *int32 `json:"min,omitempty"` + // Max - The maximum number of the resource. + Max *int32 `json:"max,omitempty"` +} + +// ResourceSku describes an available Compute SKU. +type ResourceSku struct { + // ResourceType - The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Name - The name of SKU. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** + Tier *string `json:"tier,omitempty"` + // Size - The Size of the SKU. + Size *string `json:"size,omitempty"` + // Family - The Family of this particular SKU. + Family *string `json:"family,omitempty"` + // Kind - The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + // Locations - The set of locations that the SKU is available. + Locations *[]string `json:"locations,omitempty"` + // LocationInfo - A list of locations and availability zones in those locations where the SKU is available. + LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // APIVersions - The api versions that support this SKU. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Costs - Metadata for retrieving price info. + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + // Capabilities - A name value pair to describe the capability. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// ResourceSkuCapabilities describes The SKU capabilities object. +type ResourceSkuCapabilities struct { + // Name - An invariant to describe the feature. + Name *string `json:"name,omitempty"` + // Value - An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty"` +} + +// ResourceSkuCapacity describes scaling information of a SKU. +type ResourceSkuCapacity struct { + // Minimum - The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // Default - The default capacity. + Default *int64 `json:"default,omitempty"` + // ScaleType - The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} + +// ResourceSkuCosts describes metadata for retrieving price info. +type ResourceSkuCosts struct { + // MeterID - Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty"` + // Quantity - The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty"` + // ExtendedUnit - An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty"` +} + +// ResourceSkuLocationInfo ... +type ResourceSkuLocationInfo struct { + // Location - Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictionInfo ... +type ResourceSkuRestrictionInfo struct { + // Locations - Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - The type of restrictions. Possible values include: 'Location', 'Zone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - The reason for restriction. Possible values include: 'QuotaID', 'NotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// ResourceSkusResult the Compute List Skus operation response. +type ResourceSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the subscription. + Value *[]ResourceSku `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Compute Skus. Call ListNext() with this to fetch the next page of VMSS Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. +type ResourceSkusResultIterator struct { + i int + page ResourceSkusResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceSkusResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceSkusResultIterator) Value() ResourceSku { + if !iter.page.NotDone() { + return ResourceSku{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (rsr ResourceSkusResult) IsEmpty() bool { + return rsr.Value == nil || len(*rsr.Value) == 0 +} + +// resourceSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rsr ResourceSkusResult) resourceSkusResultPreparer() (*http.Request, error) { + if rsr.NextLink == nil || len(to.String(rsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rsr.NextLink))) +} + +// ResourceSkusResultPage contains a page of ResourceSku values. +type ResourceSkusResultPage struct { + fn func(ResourceSkusResult) (ResourceSkusResult, error) + rsr ResourceSkusResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceSkusResultPage) Next() error { + next, err := page.fn(page.rsr) + if err != nil { + return err + } + page.rsr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkusResultPage) NotDone() bool { + return !page.rsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ResourceSkusResultPage) Response() ResourceSkusResult { + return page.rsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkusResultPage) Values() []ResourceSku { + if page.rsr.IsEmpty() { + return nil + } + return *page.rsr.Value +} + // RollbackStatusInfo information about rollback on failed VM instances after a OS Upgrade operation. type RollbackStatusInfo struct { // SuccessfullyRolledbackInstanceCount - The number of instances which have been successfully rolled back. @@ -2871,7 +4784,7 @@ type RollingUpgradeProgressInfo struct { // RollingUpgradeRunningStatus information about the current running state of the overall upgrade. type RollingUpgradeRunningStatus struct { - // Code - Code indicating the current status of the upgrade. Possible values include: 'RollingForward', 'Cancelled', 'Completed', 'Faulted' + // Code - Code indicating the current status of the upgrade. Possible values include: 'RollingUpgradeStatusCodeRollingForward', 'RollingUpgradeStatusCodeCancelled', 'RollingUpgradeStatusCodeCompleted', 'RollingUpgradeStatusCodeFaulted' Code RollingUpgradeStatusCode `json:"code,omitempty"` // StartTime - Start time of the upgrade. StartTime *date.Time `json:"startTime,omitempty"` @@ -3188,9 +5101,9 @@ type Sku struct { type Snapshot struct { autorest.Response `json:"-"` // ManagedBy - Unused. Always Null. - ManagedBy *string `json:"managedBy,omitempty"` - Sku *SnapshotSku `json:"sku,omitempty"` - *DiskProperties `json:"properties,omitempty"` + ManagedBy *string `json:"managedBy,omitempty"` + Sku *SnapshotSku `json:"sku,omitempty"` + *SnapshotProperties `json:"properties,omitempty"` // ID - Resource Id ID *string `json:"id,omitempty"` // Name - Resource name @@ -3212,8 +5125,8 @@ func (s Snapshot) MarshalJSON() ([]byte, error) { if s.Sku != nil { objectMap["sku"] = s.Sku } - if s.DiskProperties != nil { - objectMap["properties"] = s.DiskProperties + if s.SnapshotProperties != nil { + objectMap["properties"] = s.SnapshotProperties } if s.ID != nil { objectMap["id"] = s.ID @@ -3262,12 +5175,12 @@ func (s *Snapshot) UnmarshalJSON(body []byte) error { } case "properties": if v != nil { - var diskProperties DiskProperties - err = json.Unmarshal(*v, &diskProperties) + var snapshotProperties SnapshotProperties + err = json.Unmarshal(*v, &snapshotProperties) if err != nil { return err } - s.DiskProperties = &diskProperties + s.SnapshotProperties = &snapshotProperties } case "id": if v != nil { @@ -3422,6 +5335,24 @@ func (page SnapshotListPage) Values() []Snapshot { return *page.sl.Value } +// SnapshotProperties snapshot resource properties. +type SnapshotProperties struct { + // TimeCreated - The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. Possible values include: 'V1', 'V2' + HyperVGeneration HyperVGeneration `json:"hyperVGeneration,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + // ProvisioningState - The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type SnapshotsCreateOrUpdateFuture struct { @@ -3503,7 +5434,7 @@ func (future *SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au Acc // SnapshotSku the snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. type SnapshotSku struct { - // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS', 'StandardZRS' + // Name - The sku name. Possible values include: 'SnapshotStorageAccountTypesStandardLRS', 'SnapshotStorageAccountTypesPremiumLRS', 'SnapshotStorageAccountTypesStandardZRS' Name SnapshotStorageAccountTypes `json:"name,omitempty"` // Tier - The sku tier. Tier *string `json:"tier,omitempty"` @@ -3562,7 +5493,7 @@ func (future *SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, // SnapshotUpdate snapshot update resource. type SnapshotUpdate struct { - *DiskUpdateProperties `json:"properties,omitempty"` + *SnapshotUpdateProperties `json:"properties,omitempty"` // Tags - Resource tags Tags map[string]*string `json:"tags"` Sku *SnapshotSku `json:"sku,omitempty"` @@ -3571,8 +5502,8 @@ type SnapshotUpdate struct { // MarshalJSON is the custom marshaler for SnapshotUpdate. func (su SnapshotUpdate) MarshalJSON() ([]byte, error) { objectMap := make(map[string]interface{}) - if su.DiskUpdateProperties != nil { - objectMap["properties"] = su.DiskUpdateProperties + if su.SnapshotUpdateProperties != nil { + objectMap["properties"] = su.SnapshotUpdateProperties } if su.Tags != nil { objectMap["tags"] = su.Tags @@ -3594,12 +5525,12 @@ func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { switch k { case "properties": if v != nil { - var diskUpdateProperties DiskUpdateProperties - err = json.Unmarshal(*v, &diskUpdateProperties) + var snapshotUpdateProperties SnapshotUpdateProperties + err = json.Unmarshal(*v, &snapshotUpdateProperties) if err != nil { return err } - su.DiskUpdateProperties = &diskUpdateProperties + su.SnapshotUpdateProperties = &snapshotUpdateProperties } case "tags": if v != nil { @@ -3625,7 +5556,17 @@ func (su *SnapshotUpdate) UnmarshalJSON(body []byte) error { return nil } -// SourceVault the vault id is an Azure Resource Manager Resoure id in the form +// SnapshotUpdateProperties snapshot resource update properties. +type SnapshotUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettingsCollection - Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` +} + +// SourceVault the vault id is an Azure Resource Manager Resource id in the form // /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} type SourceVault struct { // ID - Resource Id @@ -3669,6 +5610,16 @@ type SubResourceReadOnly struct { ID *string `json:"id,omitempty"` } +// TargetRegion describes the target region information. +type TargetRegion struct { + // Name - The name of the region. + Name *string `json:"name,omitempty"` + // RegionalReplicaCount - The number of replicas of the Image Version to be created per region. This property is updatable. + RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"` + // StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. Possible values include: 'StorageAccountTypeStandardLRS', 'StorageAccountTypeStandardZRS' + StorageAccountType StorageAccountType `json:"storageAccountType,omitempty"` +} + // ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. type ThrottledRequestsInput struct { // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. @@ -3679,7 +5630,7 @@ type ThrottledRequestsInput struct { ToTime *date.Time `json:"toTime,omitempty"` // GroupByThrottlePolicy - Group query result by Throttle Policy applied. GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` - // GroupByOperationName - Group query result by by Operation Name. + // GroupByOperationName - Group query result by Operation Name. GroupByOperationName *bool `json:"groupByOperationName,omitempty"` // GroupByResourceName - Group query result by Resource Name. GroupByResourceName *bool `json:"groupByResourceName,omitempty"` @@ -3714,11 +5665,11 @@ type UpgradeOperationHistoricalStatusInfo struct { type UpgradeOperationHistoricalStatusInfoProperties struct { // RunningStatus - Information about the overall status of the upgrade operation. RunningStatus *UpgradeOperationHistoryStatus `json:"runningStatus,omitempty"` - // Progress - Counts of the VM's in each state. + // Progress - Counts of the VMs in each state. Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` // Error - Error Details for this upgrade if there are any. Error *APIError `json:"error,omitempty"` - // StartedBy - Invoker of the Upgrade Operation. Possible values include: 'Unknown', 'User', 'Platform' + // StartedBy - Invoker of the Upgrade Operation. Possible values include: 'UpgradeOperationInvokerUnknown', 'UpgradeOperationInvokerUser', 'UpgradeOperationInvokerPlatform' StartedBy UpgradeOperationInvoker `json:"startedBy,omitempty"` // TargetImageReference - Image Reference details TargetImageReference *ImageReference `json:"targetImageReference,omitempty"` @@ -3742,10 +5693,8 @@ type UpgradePolicy struct { Mode UpgradeMode `json:"mode,omitempty"` // RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade. RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` - // AutomaticOSUpgrade - Whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the image becomes available. - AutomaticOSUpgrade *bool `json:"automaticOSUpgrade,omitempty"` - // AutoOSUpgradePolicy - Configuration parameters used for performing automatic OS Upgrade. - AutoOSUpgradePolicy *AutoOSUpgradePolicy `json:"autoOSUpgradePolicy,omitempty"` + // AutomaticOSUpgradePolicy - Configuration parameters used for performing automatic OS Upgrade. + AutomaticOSUpgradePolicy *AutomaticOSUpgradePolicy `json:"automaticOSUpgradePolicy,omitempty"` } // Usage describes Compute Resource Usage. @@ -3773,7 +5722,7 @@ type UsageName struct { type VaultCertificate struct { // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } CertificateURL *string `json:"certificateUrl,omitempty"` - // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt for the X509 certificate file and .prv for private key. Both of these files are .pem formatted. + // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are .pem formatted. CertificateStore *string `json:"certificateStore,omitempty"` } @@ -4441,8 +6390,34 @@ type VirtualMachineIdentity struct { TenantID *string `json:"tenantId,omitempty"` // Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' Type ResourceIdentityType `json:"type,omitempty"` - // IdentityIds - The list of user identities associated with the Virtual Machine. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. - IdentityIds *[]string `json:"identityIds,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the Virtual Machine. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*VirtualMachineIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineIdentity. +func (vmi VirtualMachineIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmi.PrincipalID != nil { + objectMap["principalId"] = vmi.PrincipalID + } + if vmi.TenantID != nil { + objectMap["tenantId"] = vmi.TenantID + } + if vmi.Type != "" { + objectMap["type"] = vmi.Type + } + if vmi.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = vmi.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// VirtualMachineIdentityUserAssignedIdentitiesValue ... +type VirtualMachineIdentityUserAssignedIdentitiesValue struct { + // PrincipalID - The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` } // VirtualMachineImage describes a Virtual Machine Image. @@ -4542,9 +6517,10 @@ func (vmi *VirtualMachineImage) UnmarshalJSON(body []byte) error { // VirtualMachineImageProperties describes the properties of a Virtual Machine Image. type VirtualMachineImageProperties struct { - Plan *PurchasePlan `json:"plan,omitempty"` - OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` - DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` + AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"` } // VirtualMachineImageResource virtual machine image resource information. @@ -4600,7 +6576,7 @@ type VirtualMachineInstanceView struct { Disks *[]DiskInstanceView `json:"disks,omitempty"` // Extensions - The extensions information. Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    You can easily view the output of your console log.

    Azure also enables you to see a screenshot of the VM from the hypervisor. BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` // Statuses - The resource status information. Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` @@ -4714,13 +6690,15 @@ type VirtualMachineProperties struct { HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` // StorageProfile - Specifies the storage settings for the virtual machine disks. StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` // OsProfile - Specifies the operating system settings for the virtual machine. OsProfile *OSProfile `json:"osProfile,omitempty"` // NetworkProfile - Specifies the network interfaces of the virtual machine. NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` // ProvisioningState - The provisioning state, which only appears in the response. ProvisioningState *string `json:"provisioningState,omitempty"` @@ -4732,6 +6710,13 @@ type VirtualMachineProperties struct { VMID *string `json:"vmId,omitempty"` } +// VirtualMachineReimageParameters parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk will +// always be reimaged +type VirtualMachineReimageParameters struct { + // TempDisk - Specifies whether to reimage temp disk. Default value: false. + TempDisk *bool `json:"tempDisk,omitempty"` +} + // VirtualMachineScaleSet describes a Virtual Machine Scale Set. type VirtualMachineScaleSet struct { autorest.Response `json:"-"` @@ -5109,6 +7094,8 @@ type VirtualMachineScaleSetExtensionProperties struct { ProtectedSettings interface{} `json:"protectedSettings,omitempty"` // ProvisioningState - The provisioning state, which only appears in the response. ProvisioningState *string `json:"provisioningState,omitempty"` + // ProvisionAfterExtensions - Collection of extension names after which this extension needs to be provisioned. + ProvisionAfterExtensions *[]string `json:"provisionAfterExtensions,omitempty"` } // VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of @@ -5171,8 +7158,34 @@ type VirtualMachineScaleSetIdentity struct { TenantID *string `json:"tenantId,omitempty"` // Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' Type ResourceIdentityType `json:"type,omitempty"` - // IdentityIds - The list of user identities associated with the virtual machine scale set. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. - IdentityIds *[]string `json:"identityIds,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the virtual machine scale set. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for VirtualMachineScaleSetIdentity. +func (vmssi VirtualMachineScaleSetIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vmssi.PrincipalID != nil { + objectMap["principalId"] = vmssi.PrincipalID + } + if vmssi.TenantID != nil { + objectMap["tenantId"] = vmssi.TenantID + } + if vmssi.Type != "" { + objectMap["type"] = vmssi.Type + } + if vmssi.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = vmssi.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue ... +type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct { + // PrincipalID - The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` } // VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set. @@ -5272,6 +7285,8 @@ type VirtualMachineScaleSetIPConfigurationProperties struct { PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway. ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // ApplicationSecurityGroups - Specifies an array of references to application security group. + ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"` // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer. LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` // LoadBalancerInboundNatPools - Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer @@ -5700,7 +7715,7 @@ func (page VirtualMachineScaleSetListWithLinkResultPage) Values() []VirtualMachi // VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk. type VirtualMachineScaleSetManagedDiskParameters struct { - // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS, Premium_LRS, and StandardSSD_LRS. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS' + // StorageAccountType - Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot be used with OS Disk. Possible values include: 'StorageAccountTypesStandardLRS', 'StorageAccountTypesPremiumLRS', 'StorageAccountTypesStandardSSDLRS', 'StorageAccountTypesUltraSSDLRS' StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` } @@ -5813,6 +7828,8 @@ type VirtualMachineScaleSetOSDisk struct { WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` // CreateOption - Specifies how the virtual machines in the scale set should be created.

    The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiffDiskSettings - Specifies the ephemeral disk Settings for the operating system disk used by the virtual machine scale set. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` // DiskSizeGB - Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' @@ -5853,11 +7870,13 @@ type VirtualMachineScaleSetProperties struct { ProvisioningState *string `json:"provisioningState,omitempty"` // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. Overprovision *bool `json:"overprovision,omitempty"` + // DoNotRunExtensionsOnOverprovisionedVMs - When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs. + DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` // UniqueID - Specifies the ID which uniquely identifies a Virtual Machine Scale Set. UniqueID *string `json:"uniqueId,omitempty"` // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` - // ZoneBalance - Whether to force stictly even Virtual Machine distribution cross x-zones in case there is zone outage. + // ZoneBalance - Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage. ZoneBalance *bool `json:"zoneBalance,omitempty"` // PlatformFaultDomainCount - Fault Domain count for each placement group. PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` @@ -5932,6 +7951,16 @@ type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` // IPTags - The list of IP tags associated with the public IP address. IPTags *[]VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"` + // PublicIPPrefix - The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` +} + +// VirtualMachineScaleSetReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters. +type VirtualMachineScaleSetReimageParameters struct { + // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. + InstanceIds *[]string `json:"instanceIds,omitempty"` + // TempDisk - Specifies whether to reimage temp disk. Default value: false. + TempDisk *bool `json:"tempDisk,omitempty"` } // VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the results of a @@ -5957,6 +7986,29 @@ func (future *VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client V return } +// VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture an abstraction for monitoring and retrieving +// the results of a long-running operation. +type VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving the // results of a long-running operation. type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { @@ -6497,6 +8549,8 @@ type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` // ApplicationGatewayBackendAddressPools - The application gateway backend address pools. ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // ApplicationSecurityGroups - Specifies an array of references to application security group. + ApplicationSecurityGroups *[]SubResource `json:"applicationSecurityGroups,omitempty"` // LoadBalancerBackendAddressPools - The load balancer backend address pools. LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` // LoadBalancerInboundNatPools - The load balancer inbound nat pools. @@ -6942,7 +8996,7 @@ type VirtualMachineScaleSetVMInstanceView struct { Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` // VMHealth - The health status for the VM. VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"` - // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    You can easily view the output of your console log.

    Azure also enables you to see a screenshot of the VM from the hypervisor. BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` // Statuses - The resource status information. Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` @@ -7053,12 +9107,20 @@ func (page VirtualMachineScaleSetVMListResultPage) Values() []VirtualMachineScal return *page.vmssvlr.Value } +// VirtualMachineScaleSetVMNetworkProfileConfiguration describes a virtual machine scale set VM network profile. +type VirtualMachineScaleSetVMNetworkProfileConfiguration struct { + // NetworkInterfaceConfigurations - The list of network configurations. + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + // VirtualMachineScaleSetVMProfile describes a virtual machine scale set virtual machine profile. type VirtualMachineScaleSetVMProfile struct { // OsProfile - Specifies the operating system settings for the virtual machines in the scale set. OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` // StorageProfile - Specifies the storage settings for the virtual machine disks. StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the virtual machine has the capability to support attaching managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` // NetworkProfile - Specifies properties of the network interfaces of the virtual machines in the scale set. NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. @@ -7085,18 +9147,40 @@ type VirtualMachineScaleSetVMProperties struct { HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` // StorageProfile - Specifies the storage settings for the virtual machine disks. StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the virtual machine has the capability to support attaching managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` // OsProfile - Specifies the operating system settings for the virtual machine. OsProfile *OSProfile `json:"osProfile,omitempty"` // NetworkProfile - Specifies the network interfaces of the virtual machine. NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // NetworkProfileConfiguration - Specifies the network profile configuration of the virtual machine. + NetworkProfileConfiguration *VirtualMachineScaleSetVMNetworkProfileConfiguration `json:"networkProfileConfiguration,omitempty"` // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` - // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintenance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` // ProvisioningState - The provisioning state, which only appears in the response. ProvisioningState *string `json:"provisioningState,omitempty"` // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 LicenseType *string `json:"licenseType,omitempty"` + // ModelDefinitionApplied - Specifies whether the model applied to the virtual machine is the model of the virtual machine scale set or the customized model for the virtual machine. + ModelDefinitionApplied *string `json:"modelDefinitionApplied,omitempty"` + // ProtectionPolicy - Specifies the protection policy of the virtual machine. + ProtectionPolicy *VirtualMachineScaleSetVMProtectionPolicy `json:"protectionPolicy,omitempty"` +} + +// VirtualMachineScaleSetVMProtectionPolicy the protection policy of a virtual machine scale set VM. +type VirtualMachineScaleSetVMProtectionPolicy struct { + // ProtectFromScaleIn - Indicates that the virtual machine scale set VM shouldn't be considered for deletion during a scale-in operation. + ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty"` + // ProtectFromScaleSetActions - Indicates that model updates or actions (including scale-in) initiated on the virtual machine scale set should not be applied to the virtual machine scale set VM. + ProtectFromScaleSetActions *bool `json:"protectFromScaleSetActions,omitempty"` +} + +// VirtualMachineScaleSetVMReimageParameters describes a Virtual Machine Scale Set VM Reimage Parameters. +type VirtualMachineScaleSetVMReimageParameters struct { + // TempDisk - Specifies whether to reimage temp disk. Default value: false. + TempDisk *bool `json:"tempDisk,omitempty"` } // VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a @@ -7583,6 +9667,29 @@ func (future *VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient return } +// VirtualMachinesReimageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesReimageFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *VirtualMachinesReimageFuture) Result(client VirtualMachinesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesReimageFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("compute.VirtualMachinesReimageFuture") + return + } + ar.Response = future.Response() + return +} + // VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running // operation. type VirtualMachinesRestartFuture struct { @@ -7789,6 +9896,12 @@ func (vmu *VirtualMachineUpdate) UnmarshalJSON(body []byte) error { return nil } +// VMScaleSetConvertToSinglePlacementGroupInput ... +type VMScaleSetConvertToSinglePlacementGroupInput struct { + // ActivePlacementGroupID - Id of the placement group in which you want future virtual machine instances to be placed. To query placement group Id, please use Virtual Machine Scale Set VMs - Get API. If not provided, the platform will choose one with maximum number of virtual machine instances. + ActivePlacementGroupID *string `json:"activePlacementGroupId,omitempty"` +} + // WindowsConfiguration specifies Windows operating system settings on the virtual machine. type WindowsConfiguration struct { // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/operations.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/operations.go index 14c892fe10acd..648375b4f352d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/operations.go @@ -64,7 +64,7 @@ func (client OperationsClient) List(ctx context.Context) (result OperationListRe // ListPreparer prepares the List request. func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/resourceskus.go new file mode 100644 index 0000000000000..de32a3df699dd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/resourceskus.go @@ -0,0 +1,130 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ResourceSkusClient is the compute Client +type ResourceSkusClient struct { + BaseClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the list of Microsoft.Compute SKUs available for your Subscription. +func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result.rsr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) listNextResults(lastResults ResourceSkusResult) (result ResourceSkusResult, err error) { + req, err := lastResults.resourceSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) { + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/snapshots.go similarity index 93% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/snapshots.go index b87359354942e..c01e7dea745b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/snapshots.go @@ -49,21 +49,13 @@ func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) Snapsh func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) { if err := validation.Validate([]validation.Validation{ {TargetValue: snapshot, - Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + Constraints: []validation.Constraint{{Target: "snapshot.SnapshotProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, }}, - {Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, - }}, - {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}, + {Target: "snapshot.SnapshotProperties.EncryptionSettingsCollection", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.SnapshotProperties.EncryptionSettingsCollection.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { return result, validation.NewError("compute.SnapshotsClient", "CreateOrUpdate", err.Error()) } @@ -91,7 +83,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resour "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -165,7 +157,7 @@ func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -242,7 +234,7 @@ func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -311,7 +303,7 @@ func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceG "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -386,7 +378,7 @@ func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -479,7 +471,7 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, r "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -568,7 +560,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resource "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -640,7 +632,7 @@ func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupN "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2018-09-30" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/usage.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/usage.go index fd007d01ea342..a6ca95a1f1f59 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/usage.go @@ -80,7 +80,7 @@ func (client UsageClient) ListPreparer(ctx context.Context, location string) (*h "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/version.go similarity index 94% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/version.go index 87d03ecf94907..184b918cc5427 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/version.go @@ -21,7 +21,7 @@ import "github.com/Azure/azure-sdk-for-go/version" // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/" + version.Number + " compute/2018-04-01" + return "Azure-SDK-For-Go/" + version.Number + " compute/2019-03-01" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensionimages.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensionimages.go index 45714d55e79cb..134b887dda0db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensionimages.go @@ -75,7 +75,7 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Contex "version": autorest.Encode("path", version), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -141,7 +141,7 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -209,7 +209,7 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx conte "type": autorest.Encode("path", typeParameter), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensions.go index 06673a73d63ed..c78423650e4df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineextensions.go @@ -70,7 +70,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context. "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -145,7 +145,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -224,7 +224,7 @@ func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -295,7 +295,7 @@ func (client VirtualMachineExtensionsClient) ListPreparer(ctx context.Context, r "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -362,7 +362,7 @@ func (client VirtualMachineExtensionsClient) UpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineimages.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineimages.go index aeaa39b049185..f3e2967a5470d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineimages.go @@ -79,7 +79,7 @@ func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, locati "version": autorest.Encode("path", version), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -151,7 +151,7 @@ func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, locat "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -227,7 +227,7 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -292,7 +292,7 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Cont "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -361,7 +361,7 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineruncommands.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineruncommands.go index 3777e8b867037..9eb28a0f5c5b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachineruncommands.go @@ -80,7 +80,7 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, l "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -152,7 +152,7 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachines.go similarity index 86% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachines.go index 40a6166a1f346..787520cbdf352 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachines.go @@ -78,7 +78,7 @@ func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -152,7 +152,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Co "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -244,7 +244,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -318,7 +318,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -388,7 +388,7 @@ func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -464,7 +464,7 @@ func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -531,7 +531,7 @@ func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGro "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -601,7 +601,7 @@ func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, re "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -668,7 +668,7 @@ func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGr "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -759,7 +759,7 @@ func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context) (*http. "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -853,7 +853,7 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -886,6 +886,105 @@ func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Respo return } +// ListByLocation gets all the virtual machines under the specified subscription for the specified location. +// Parameters: +// location - the location for which virtual machines under the subscription are queried. +func (client VirtualMachinesClient) ListByLocation(ctx context.Context, location string) (result VirtualMachineListResultPage, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("compute.VirtualMachinesClient", "ListByLocation", err.Error()) + } + + result.fn = client.listByLocationNextResults + req, err := client.ListByLocationPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", nil, "Failure preparing request") + return + } + + resp, err := client.ListByLocationSender(req) + if err != nil { + result.vmlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", resp, "Failure sending request") + return + } + + result.vmlr, err = client.ListByLocationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListByLocation", resp, "Failure responding to request") + } + + return +} + +// ListByLocationPreparer prepares the ListByLocation request. +func (client VirtualMachinesClient) ListByLocationPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByLocationSender sends the ListByLocation request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListByLocationSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByLocationResponder handles the response to the ListByLocation request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListByLocationResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByLocationNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) listByLocationNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.virtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByLocationSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByLocationResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listByLocationNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByLocationComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachinesClient) ListByLocationComplete(ctx context.Context, location string) (result VirtualMachineListResultIterator, err error) { + result.page, err = client.ListByLocation(ctx, location) + return +} + // PerformMaintenance the operation to perform maintenance on a virtual machine. // Parameters: // resourceGroupName - the name of the resource group. @@ -914,7 +1013,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Conte "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -961,8 +1060,11 @@ func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Respo // Parameters: // resourceGroupName - the name of the resource group. // VMName - the name of the virtual machine. -func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPowerOffFuture, err error) { - req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMName) +// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates +// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not +// specified +func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (result VirtualMachinesPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMName, skipShutdown) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") return @@ -978,17 +1080,22 @@ func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupN } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { +func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMName string, skipShutdown *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if skipShutdown != nil { + queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown) + } else { + queryParameters["skipShutdown"] = autorest.Encode("query", false) + } preparer := autorest.CreatePreparer( autorest.AsPost(), @@ -1055,7 +1162,7 @@ func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resour "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1097,6 +1204,82 @@ func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (resu return } +// Reimage reimages the virtual machine which has an ephemeral OS disk back to its initial state. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMName - the name of the virtual machine. +// parameters - parameters supplied to the Reimage Virtual Machine operation. +func (client VirtualMachinesClient) Reimage(ctx context.Context, resourceGroupName string, VMName string, parameters *VirtualMachineReimageParameters) (result VirtualMachinesReimageFuture, err error) { + req, err := client.ReimagePreparer(ctx, resourceGroupName, VMName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", nil, "Failure preparing request") + return + } + + result, err = client.ReimageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Reimage", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachinesClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters *VirtualMachineReimageParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if parameters != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(parameters)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ReimageSender(req *http.Request) (future VirtualMachinesReimageFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ReimageResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // Restart the operation to restart a virtual machine. // Parameters: // resourceGroupName - the name of the resource group. @@ -1125,7 +1308,7 @@ func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourc "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1202,7 +1385,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, reso "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1275,7 +1458,7 @@ func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceG "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1346,7 +1529,7 @@ func (client VirtualMachinesClient) UpdatePreparer(ctx context.Context, resource "vmName": autorest.Encode("path", VMName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetextensions.go similarity index 99% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetextensions.go index c2f1187f8596d..ab33dc8a7a6c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetextensions.go @@ -71,7 +71,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -146,7 +146,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context. "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -225,7 +225,7 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Con "vmssExtensionName": autorest.Encode("path", vmssExtensionName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -296,7 +296,7 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Co "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetrollingupgrades.go similarity index 75% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetrollingupgrades.go index 381cb0cb63276..2426277a57f81 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetrollingupgrades.go @@ -69,7 +69,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -145,7 +145,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -178,6 +178,78 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(res return } +// StartExtensionUpgrade starts a rolling upgrade to move all extensions for all virtual machine scale set instances to +// the latest available extension version. Instances which are already running the latest extension versions are not +// affected. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) { + req, err := client.StartExtensionUpgradePreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", nil, "Failure preparing request") + return + } + + result, err = client.StartExtensionUpgradeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartExtensionUpgrade", result.Response(), "Failure sending request") + return + } + + return +} + +// StartExtensionUpgradePreparer prepares the StartExtensionUpgrade request. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartExtensionUpgradeSender sends the StartExtensionUpgrade request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// StartExtensionUpgradeResponder handles the response to the StartExtensionUpgrade request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartExtensionUpgradeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available // Platform Image OS version. Instances which are already running the latest available OS version are not affected. // Parameters: @@ -207,7 +279,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesets.go similarity index 93% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesets.go index ef73c14177561..bb1cadd5ae732 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesets.go @@ -40,6 +40,70 @@ func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } +// ConvertToSinglePlacementGroup converts SinglePlacementGroup property to false for a existing virtual machine scale +// set. +// Parameters: +// resourceGroupName - the name of the resource group. +// VMScaleSetName - the name of the virtual machine scale set to create or update. +// parameters - the input object for ConvertToSinglePlacementGroup API. +func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroup(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput) (result autorest.Response, err error) { + req, err := client.ConvertToSinglePlacementGroupPreparer(ctx, resourceGroupName, VMScaleSetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ConvertToSinglePlacementGroupSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", resp, "Failure sending request") + return + } + + result, err = client.ConvertToSinglePlacementGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ConvertToSinglePlacementGroup", resp, "Failure responding to request") + } + + return +} + +// ConvertToSinglePlacementGroupPreparer prepares the ConvertToSinglePlacementGroup request. +func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup", pathParameters), + autorest.WithJSON(parameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ConvertToSinglePlacementGroupSender sends the ConvertToSinglePlacementGroup request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ConvertToSinglePlacementGroupResponder handles the response to the ConvertToSinglePlacementGroup request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroupResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + // CreateOrUpdate create or update a VM scale set. // Parameters: // resourceGroupName - the name of the resource group. @@ -92,7 +156,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.C "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -167,7 +231,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -242,7 +306,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -319,7 +383,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -399,7 +463,7 @@ func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUp "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain), @@ -467,7 +531,7 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, res "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -534,7 +598,7 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -602,7 +666,7 @@ func (client VirtualMachineScaleSetsClient) GetOSUpgradeHistoryPreparer(ctx cont "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -695,7 +759,7 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, re "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -787,7 +851,7 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -883,7 +947,7 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -974,7 +1038,7 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenancePreparer(ctx conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1027,8 +1091,11 @@ func (client VirtualMachineScaleSetsClient) PerformMaintenanceResponder(resp *ht // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. // VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsPowerOffFuture, err error) { - req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) +// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates +// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not +// specified +func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (result VirtualMachineScaleSetsPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs, skipShutdown) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") return @@ -1044,17 +1111,22 @@ func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resour } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs, skipShutdown *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if skipShutdown != nil { + queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown) + } else { + queryParameters["skipShutdown"] = autorest.Encode("query", false) + } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), @@ -1127,7 +1199,7 @@ func (client VirtualMachineScaleSetsClient) RedeployPreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1174,13 +1246,14 @@ func (client VirtualMachineScaleSetsClient) RedeployResponder(resp *http.Respons return } -// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set. +// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set which don't have a +// ephemeral OS disk, for virtual machines who have a ephemeral OS disk the virtual machine is reset to initial state. // Parameters: // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. -// VMInstanceIDs - a list of virtual machine instance IDs from the VM scale set. -func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageFuture, err error) { - req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) +// VMScaleSetReimageInput - parameters for Reimaging VM ScaleSet. +func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *VirtualMachineScaleSetReimageParameters) (result VirtualMachineScaleSetsReimageFuture, err error) { + req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, VMScaleSetReimageInput) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") return @@ -1196,14 +1269,14 @@ func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourc } // ReimagePreparer prepares the Reimage request. -func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { +func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMScaleSetReimageInput *VirtualMachineScaleSetReimageParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1214,9 +1287,9 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters), autorest.WithQueryParameters(queryParameters)) - if VMInstanceIDs != nil { + if VMScaleSetReimageInput != nil { preparer = autorest.DecoratePreparer(preparer, - autorest.WithJSON(VMInstanceIDs)) + autorest.WithJSON(VMScaleSetReimageInput)) } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -1280,7 +1353,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1356,7 +1429,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1432,7 +1505,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1508,7 +1581,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1588,7 +1661,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context. "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go index a068ec453f0e6..9fe1d1bdc95fe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinescalesetvms.go @@ -72,7 +72,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -144,7 +144,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -222,7 +222,7 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, r "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -291,7 +291,7 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -362,7 +362,7 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -461,7 +461,7 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenancePreparer(ctx con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -509,8 +509,11 @@ func (client VirtualMachineScaleSetVMsClient) PerformMaintenanceResponder(resp * // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. // instanceID - the instance ID of the virtual machine. -func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsPowerOffFuture, err error) { - req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) +// skipShutdown - the parameter to request non-graceful VM shutdown. True value for this flag indicates +// non-graceful shutdown whereas false indicates otherwise. Default value for this flag is false if not +// specified +func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (result VirtualMachineScaleSetVMsPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, skipShutdown) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") return @@ -526,7 +529,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, reso } // PowerOffPreparer prepares the PowerOff request. -func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, skipShutdown *bool) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -534,10 +537,15 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } + if skipShutdown != nil { + queryParameters["skipShutdown"] = autorest.Encode("query", *skipShutdown) + } else { + queryParameters["skipShutdown"] = autorest.Encode("query", false) + } preparer := autorest.CreatePreparer( autorest.AsPost(), @@ -606,7 +614,7 @@ func (client VirtualMachineScaleSetVMsClient) RedeployPreparer(ctx context.Conte "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -653,8 +661,9 @@ func (client VirtualMachineScaleSetVMsClient) RedeployResponder(resp *http.Respo // resourceGroupName - the name of the resource group. // VMScaleSetName - the name of the VM scale set. // instanceID - the instance ID of the virtual machine. -func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageFuture, err error) { - req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) +// VMScaleSetVMReimageInput - parameters for the Reimaging Virtual machine in ScaleSet. +func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *VirtualMachineScaleSetVMReimageParameters) (result VirtualMachineScaleSetVMsReimageFuture, err error) { + req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, VMScaleSetVMReimageInput) if err != nil { err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") return @@ -670,7 +679,7 @@ func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resou } // ReimagePreparer prepares the Reimage request. -func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, VMScaleSetVMReimageInput *VirtualMachineScaleSetVMReimageParameters) (*http.Request, error) { pathParameters := map[string]interface{}{ "instanceId": autorest.Encode("path", instanceID), "resourceGroupName": autorest.Encode("path", resourceGroupName), @@ -678,16 +687,21 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters), autorest.WithQueryParameters(queryParameters)) + if VMScaleSetVMReimageInput != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMScaleSetVMReimageInput)) + } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } @@ -751,7 +765,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -823,7 +837,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Contex "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -902,7 +916,7 @@ func (client VirtualMachineScaleSetVMsClient) RunCommandPreparer(ctx context.Con "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -977,7 +991,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } @@ -1071,7 +1085,7 @@ func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context "vmScaleSetName": autorest.Encode("path", VMScaleSetName), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinesizes.go similarity index 96% rename from vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go rename to vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinesizes.go index d339b5dee2d0e..545a675342def 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute/virtualmachinesizes.go @@ -40,7 +40,8 @@ func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID stri return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } -// List lists all available virtual machine sizes for a subscription in a location. +// List this API is deprecated. Use [Resources +// Skus](https://docs.microsoft.com/en-us/rest/api/compute/resourceskus/list) // Parameters: // location - the location upon which virtual-machine-sizes is queried. func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) { @@ -78,7 +79,7 @@ func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, locati "subscriptionId": autorest.Encode("path", client.SubscriptionID), } - const APIVersion = "2018-04-01" + const APIVersion = "2019-03-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index f0ba0326824e4..71a3d1a41c30f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -18,4 +18,4 @@ package version // Changes may cause incorrect behavior and will be lost if the code is regenerated. // Number contains the semantic version of this SDK. -const Number = "v19.0.0" +const Number = "v19.2.0" From fd5cb4139745dbd6934c629bfefa4112022eef06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Osipiuk?= Date: Thu, 25 Apr 2019 12:45:44 +0200 Subject: [PATCH 76/96] Update Cluster Autoscaler to 1.12.5 --- cluster/gce/manifests/cluster-autoscaler.manifest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/gce/manifests/cluster-autoscaler.manifest b/cluster/gce/manifests/cluster-autoscaler.manifest index 8b09dbc14d965..43aa31cc72871 100644 --- a/cluster/gce/manifests/cluster-autoscaler.manifest +++ b/cluster/gce/manifests/cluster-autoscaler.manifest @@ -17,7 +17,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.12.3", + "image": "k8s.gcr.io/cluster-autoscaler:v1.12.5", "livenessProbe": { "httpGet": { "path": "/health-check", From 422643f888ed5818f58c70e154327b8b30fc8451 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 22 Apr 2019 06:41:45 +0000 Subject: [PATCH 77/96] add shareName param in azure file storage class skip create azure file if it exists remove comments --- pkg/cloudprovider/providers/azure/azure_file.go | 17 +++++------------ pkg/volume/azure_file/azure_provision.go | 17 +++++++++++------ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_file.go b/pkg/cloudprovider/providers/azure/azure_file.go index 138dabb5765bd..4772aab257559 100644 --- a/pkg/cloudprovider/providers/azure/azure_file.go +++ b/pkg/cloudprovider/providers/azure/azure_file.go @@ -58,21 +58,14 @@ func (f *azureFileClient) createFileShare(accountName, accountKey, name string, if err != nil { return err } - // create a file share and set quota - // Note. Per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share, - // setting x-ms-share-quota can set quota on the new share, but in reality, setting quota in CreateShare - // receives error "The metadata specified is invalid. It has characters that are not permitted." - // As a result,breaking into two API calls: create share and set quota share := fileClient.GetShareReference(name) - if err = share.Create(nil); err != nil { + share.Properties.Quota = sizeGiB + newlyCreated, err := share.CreateIfNotExists(nil) + if err != nil { return fmt.Errorf("failed to create file share, err: %v", err) } - share.Properties.Quota = sizeGiB - if err = share.SetProperties(nil); err != nil { - if err := share.Delete(nil); err != nil { - glog.Errorf("Error deleting share: %v", err) - } - return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) + if !newlyCreated { + glog.V(2).Infof("file share(%s) under account(%s) already exists", name, accountName) } return nil } diff --git a/pkg/volume/azure_file/azure_provision.go b/pkg/volume/azure_file/azure_provision.go index f1cb301ab12f5..2d5599617e922 100644 --- a/pkg/volume/azure_file/azure_provision.go +++ b/pkg/volume/azure_file/azure_provision.go @@ -139,11 +139,8 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie return nil, fmt.Errorf("%s does not support block volume provisioning", a.plugin.GetPluginName()) } - var sku, resourceGroup, location, account string + var sku, resourceGroup, location, account, shareName string - // File share name has a length limit of 63, and it cannot contain two consecutive '-'s. - name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) - name = strings.Replace(name, "--", "-", -1) capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() requestGiB := int(util.RoundUpSize(requestBytes, 1024*1024*1024)) @@ -162,6 +159,8 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie secretNamespace = v case "resourcegroup": resourceGroup = v + case "sharename": + shareName = v default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName()) } @@ -171,7 +170,13 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure file") } - account, key, err := a.azureProvider.CreateFileShare(name, account, sku, resourceGroup, location, requestGiB) + if shareName == "" { + // File share name has a length limit of 63, and it cannot contain two consecutive '-'s. + name := util.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 63) + shareName = strings.Replace(name, "--", "-", -1) + } + + account, key, err := a.azureProvider.CreateFileShare(shareName, account, sku, resourceGroup, location, requestGiB) if err != nil { return nil, err } @@ -199,7 +204,7 @@ func (a *azureFileProvisioner) Provision(selectedNode *v1.Node, allowedTopologie PersistentVolumeSource: v1.PersistentVolumeSource{ AzureFile: &v1.AzureFilePersistentVolumeSource{ SecretName: secretName, - ShareName: name, + ShareName: shareName, SecretNamespace: &secretNamespace, }, }, From 5a71a71f19ef61fe169c4167b5e13f1f9fdcedaa Mon Sep 17 00:00:00 2001 From: Matt Matejczyk Date: Thu, 18 Apr 2019 17:05:33 +0200 Subject: [PATCH 78/96] Create the "internal" firewall rule for kubemark master. This is equivalent to the "internal" firewall rule that is created for the regular masters. The main reason for doing it is to allow prometheus scraping metrics from various kubemark master components, e.g. kubelet. Ref. https://github.com/kubernetes/perf-tests/issues/503 --- test/kubemark/gce/util.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/kubemark/gce/util.sh b/test/kubemark/gce/util.sh index 51de5a9aa6863..a007cc5896419 100644 --- a/test/kubemark/gce/util.sh +++ b/test/kubemark/gce/util.sh @@ -102,6 +102,13 @@ function create-master-instance-with-resources { --target-tags "${MASTER_TAG}" \ --allow "tcp:443" & + run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-internal" \ + --project "${PROJECT}" \ + --network "${NETWORK}" \ + --source-ranges "10.0.0.0/8" \ + --target-tags "${MASTER_TAG}" \ + --allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" & + wait } @@ -136,6 +143,10 @@ function delete-master-instance-and-resources { --project "${PROJECT}" \ --quiet || true + gcloud compute firewall-rules delete "${MASTER_NAME}-internal" \ + --project "${PROJECT}" \ + --quiet || true + if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then gcloud compute instances delete "${EVENT_STORE_NAME}" \ ${GCLOUD_COMMON_ARGS} || true From a1a0378c1aea8703fbd022ec4133decdb9f69004 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sat, 13 Apr 2019 12:45:32 +0000 Subject: [PATCH 79/96] refactor detach azure disk retry operation --- .../azure/azure_controller_common.go | 24 ++++++++++++-- .../azure/azure_controller_standard.go | 32 ++++++------------- .../providers/azure/azure_controller_vmss.go | 30 +++++------------ .../providers/azure/azure_fakes.go | 4 +-- .../providers/azure/azure_vmsets.go | 6 ++-- pkg/volume/azure_dd/attacher.go | 2 +- pkg/volume/azure_dd/azure_dd.go | 2 +- 7 files changed, 46 insertions(+), 54 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index d52d2555913a3..1175a5c620da2 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -95,14 +95,32 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) } -// DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI. -func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. +func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error { vmset, err := c.getNodeVMSet(nodeName) if err != nil { return err } - return vmset.DetachDiskByName(diskName, diskURI, nodeName) + resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { + glog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) + retryErr := kwait.ExponentialBackoff(c.cloud.requestBackoff(), func() (bool, error) { + resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + return c.cloud.processHTTPRetryResponse(nil, "", resp, err) + }) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err) + } + } + if err != nil { + glog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) + } else { + glog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) + } + + return err } // getNodeDataDisks invokes vmSet interfaces to get data disks for the node. diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 90c68da099802..59fdf02d6871c 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -18,6 +18,7 @@ package azure import ( "fmt" + "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" @@ -88,7 +89,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error glog.V(2).Infof("azureDisk - err %v, try detach disk(%s, %s)", err, diskName, diskURI) - as.DetachDiskByName(diskName, diskURI, nodeName) + as.DetachDisk(diskName, diskURI, nodeName) } } else { glog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) @@ -96,20 +97,20 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri return err } -// DetachDiskByName detaches a vhd from host +// DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI -func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vm, err := as.getVirtualMachine(nodeName) if err != nil { // if host doesn't exist, no need to detach - glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) - return nil + glog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI) + return nil, nil } vmName := mapNodeNameToVMName(nodeName) nodeResourceGroup, err := as.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := *vm.StorageProfile.DataDisks @@ -127,7 +128,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t } if !bFoundDisk { - return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) } newVM := compute.VirtualMachine{ @@ -146,22 +147,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t // Invalidate the cache right after updating defer as.cloud.vmCache.Delete(vmName) - resp, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) - if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) - retryErr := as.CreateOrUpdateVMWithRetry(nodeResourceGroup, vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, vmName, diskName, diskURI, err) - } - } - if err != nil { - glog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err) - } else { - glog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) - } - - return err + return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) } // GetDataDisks gets a list of data disks attached to the node. diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index 485344641c424..2bf84c905e083 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -18,6 +18,7 @@ package azure import ( "fmt" + "net/http" "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" @@ -93,7 +94,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error glog.Infof("azureDisk - err %s, try detach disk(%s, %s)", detail, diskName, diskURI) - ss.DetachDiskByName(diskName, diskURI, nodeName) + ss.DetachDisk(diskName, diskURI, nodeName) } } else { glog.V(2).Infof("azureDisk - attach disk(%s, %s) succeeded", diskName, diskURI) @@ -101,18 +102,18 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod return err } -// DetachDiskByName detaches a vhd from host +// DetachDisk detaches a disk from host // the vhd can be identified by diskName or diskURI -func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { +func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { vmName := mapNodeNameToVMName(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(vmName) if err != nil { - return err + return nil, err } nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName) if err != nil { - return err + return nil, err } disks := []compute.DataDisk{} @@ -133,7 +134,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No } if !bFoundDisk { - return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + return nil, fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) } newVM := compute.VirtualMachineScaleSetVM{ @@ -156,22 +157,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No defer ss.vmssVMCache.Delete(key) glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) - resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) - if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) - retryErr := ss.UpdateVmssVMWithRetry(ctx, nodeResourceGroup, ssName, instanceID, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s) detach disk(%s, %s), err: %v", nodeResourceGroup, nodeName, diskName, diskURI, err) - } - } - if err != nil { - glog.Errorf("azureDisk - detach disk(%s, %s) from %s failed, err: %v", diskName, diskURI, nodeName, err) - } else { - glog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI) - } - - return err + return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM) } // GetDataDisks gets a list of data disks attached to the node. diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 10c4192a5361c..3854c1ecefa0f 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -914,8 +914,8 @@ func (f *fakeVMSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - return fmt.Errorf("unimplemented") +func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { + return nil, fmt.Errorf("unimplemented") } func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index 93686702f5c80..70fcf85c3a73e 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -17,6 +17,8 @@ limitations under the License. package azure import ( + "net/http" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" @@ -63,8 +65,8 @@ type VMSet interface { // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error - // DetachDiskByName detaches a vhd from host. The vhd can be identified by diskName or diskURI. - DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error + // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. + DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) // GetDataDisks gets a list of data disks attached to the node. GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 419a4838e0cdb..f6b1d36a101d7 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -301,7 +301,7 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) - err = diskController.DetachDiskByName("", diskURI, nodeName) + err = diskController.DetachDisk("", diskURI, nodeName) if err != nil { glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 7c58808b03c96..9b3fdc32d13a1 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -46,7 +46,7 @@ type DiskController interface { // Attaches the disk to the host machine. AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. - DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error + DetachDisk(diskName, diskUri string, nodeName types.NodeName) error // Check if a list of volumes are attached to the node with the specified NodeName DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) From 7047d3701847f5a0fc9b28fe481dc503af26bf07 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 14 Apr 2019 03:09:06 +0000 Subject: [PATCH 80/96] move disk lock process to azure cloud provider fix comments fix import keymux check error add unit test for attach/detach disk funcs fix bazel issue rebase --- pkg/cloudprovider/providers/azure/BUILD | 2 + .../azure/azure_controller_common.go | 39 ++++++++++- .../azure/azure_controller_common_test.go | 66 +++++++++++++++++++ pkg/volume/azure_dd/BUILD | 1 - pkg/volume/azure_dd/attacher.go | 40 ++--------- pkg/volume/azure_dd/azure_dd.go | 2 +- 6 files changed, 111 insertions(+), 39 deletions(-) create mode 100644 pkg/cloudprovider/providers/azure/azure_controller_common_test.go diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index d0da37b0309f2..945dbbb8ee64c 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -41,6 +41,7 @@ go_library( "//pkg/cloudprovider/providers/azure/auth:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubelet/apis:go_default_library", + "//pkg/util/keymutex:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -79,6 +80,7 @@ go_test( srcs = [ "azure_backoff_test.go", "azure_cache_test.go", + "azure_controller_common_test.go", "azure_instances_test.go", "azure_loadbalancer_test.go", "azure_metrics_test.go", diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index 1175a5c620da2..f223ac8e46b1f 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "context" "fmt" "time" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/util/keymutex" ) const ( @@ -50,6 +52,9 @@ var defaultBackOff = kwait.Backoff{ Jitter: 0.0, } +// acquire lock to attach/detach disk in one node +var diskOpMutex = keymutex.NewHashed(0) + type controllerCommon struct { subscriptionID string location string @@ -85,13 +90,29 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) return ss, nil } -// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. -func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { +// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI. +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) error { vmset, err := c.getNodeVMSet(nodeName) if err != nil { return err } + instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) + if err != nil { + glog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + } + + diskOpMutex.LockKey(instanceid) + defer diskOpMutex.UnlockKey(instanceid) + + lun, err := c.GetNextDiskLun(nodeName) + if err != nil { + glog.Warningf("no LUN available for instance %q (%v)", nodeName, err) + return fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err) + } + + glog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName) return vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode) } @@ -102,11 +123,25 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N return err } + instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) + if err != nil { + glog.Warningf("failed to get azure instance id (%v)", err) + return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) + } + + glog.V(2).Infof("detach %v from node %q", diskURI, nodeName) + + // make the lock here as small as possible + diskOpMutex.LockKey(instanceid) resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + diskOpMutex.UnlockKey(instanceid) + if c.cloud.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { glog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) retryErr := kwait.ExponentialBackoff(c.cloud.requestBackoff(), func() (bool, error) { + diskOpMutex.LockKey(instanceid) resp, err := vmset.DetachDisk(diskName, diskURI, nodeName) + diskOpMutex.UnlockKey(instanceid) return c.cloud.processHTTPRetryResponse(nil, "", resp, err) }) if retryErr != nil { diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common_test.go b/pkg/cloudprovider/providers/azure/azure_controller_common_test.go new file mode 100644 index 0000000000000..c6bd2807bce78 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_controller_common_test.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-03-01/compute" +) + +func TestAttachDisk(t *testing.T) { + c := getTestCloud() + + common := &controllerCommon{ + location: c.Location, + storageEndpointSuffix: c.Environment.StorageEndpointSuffix, + resourceGroup: c.ResourceGroup, + subscriptionID: c.SubscriptionID, + cloud: c, + } + + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup) + + err := common.AttachDisk(true, "", diskURI, "node1", compute.CachingTypesReadOnly) + if err != nil { + fmt.Printf("TestAttachDisk return expected error: %v", err) + } else { + t.Errorf("TestAttachDisk unexpected nil err") + } +} + +func TestDetachDisk(t *testing.T) { + c := getTestCloud() + + common := &controllerCommon{ + location: c.Location, + storageEndpointSuffix: c.Environment.StorageEndpointSuffix, + resourceGroup: c.ResourceGroup, + subscriptionID: c.SubscriptionID, + cloud: c, + } + + diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name", c.SubscriptionID, c.ResourceGroup) + + err := common.DetachDisk("", diskURI, "node1") + if err != nil { + fmt.Printf("TestAttachDisk return expected error: %v", err) + } else { + t.Errorf("TestAttachDisk unexpected nil err") + } +} diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 2aaa227e48f45..e32fb132848f0 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -26,7 +26,6 @@ go_library( "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/util/keymutex:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index f6b1d36a101d7..5b4fa13e2fe10 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -17,7 +17,6 @@ limitations under the License. package azure_dd import ( - "context" "fmt" "os" "path/filepath" @@ -33,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -55,9 +53,6 @@ var _ volume.Detacher = &azureDiskDetacher{} var _ volume.DeviceMounter = &azureDiskAttacher{} var _ volume.DeviceUnmounter = &azureDiskDetacher{} -// acquire lock to get an lun number -var getLunMutex = keymutex.NewHashed(0) - // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, _, err := getVolumeSource(spec) @@ -66,12 +61,6 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( return "", err } - instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName) - if err != nil { - glog.Warningf("failed to get azure instance id (%v)", err) - return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) - } - diskController, err := getDiskController(a.plugin.host) if err != nil { return "", err @@ -82,30 +71,22 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( // Log error and continue with attach glog.Warningf( "Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v", - instanceid, err) + nodeName, err) } if err == nil { // Volume is already attached to node. - glog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) + glog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, nodeName, lun) } else { glog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) - getLunMutex.LockKey(instanceid) - defer getLunMutex.UnlockKey(instanceid) - lun, err = diskController.GetNextDiskLun(nodeName) - if err != nil { - glog.Warningf("no LUN available for instance %q (%v)", nodeName, err) - return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err) - } - glog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) - err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) + err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { glog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { - glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) - return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err) + glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, nodeName, err) + return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, nodeName, err) } } @@ -285,22 +266,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro return fmt.Errorf("invalid disk to detach: %q", diskURI) } - instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName) - if err != nil { - glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) - return nil - } - - glog.V(2).Infof("detach %v from node %q", diskURI, nodeName) - diskController, err := getDiskController(d.plugin.host) if err != nil { return err } - getLunMutex.LockKey(instanceid) - defer getLunMutex.UnlockKey(instanceid) - err = diskController.DetachDisk("", diskURI, nodeName) if err != nil { glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 9b3fdc32d13a1..622649a567634 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -44,7 +44,7 @@ type DiskController interface { DeleteManagedDisk(diskURI string) error // Attaches the disk to the host machine. - AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. DetachDisk(diskName, diskUri string, nodeName types.NodeName) error From 3187e4a31dbb7587371386de746c47f42c4848f0 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Sun, 28 Apr 2019 11:56:59 +0000 Subject: [PATCH 81/96] fix disk list corruption issue --- .../providers/azure/azure_controller_standard.go | 8 ++++++-- .../providers/azure/azure_controller_vmss.go | 6 ++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 59fdf02d6871c..f748c1cd018c0 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -41,7 +41,9 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri return err } - disks := *vm.StorageProfile.DataDisks + disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) + copy(disks, *vm.StorageProfile.DataDisks) + if isManagedDisk { disks = append(disks, compute.DataDisk{ @@ -113,7 +115,9 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N return nil, err } - disks := *vm.StorageProfile.DataDisks + disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) + copy(disks, *vm.StorageProfile.DataDisks) + bFoundDisk := false for i, disk := range disks { if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index 2bf84c905e083..13fcb9341ad78 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -43,7 +43,8 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod disks := []compute.DataDisk{} if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = *vm.StorageProfile.DataDisks + disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) + copy(disks, *vm.StorageProfile.DataDisks) } if isManagedDisk { disks = append(disks, @@ -118,7 +119,8 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName disks := []compute.DataDisk{} if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = *vm.StorageProfile.DataDisks + disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) + copy(disks, *vm.StorageProfile.DataDisks) } bFoundDisk := false for i, disk := range disks { From 44785f9399c1ac5279d5c33339920ad3b16a1a3c Mon Sep 17 00:00:00 2001 From: Michael Taufen Date: Mon, 29 Apr 2019 16:26:27 -0700 Subject: [PATCH 82/96] Fix verify godeps failure for 1.12 github.com/evanphx/json-patch added a new tag at the same sha this morning: https://github.com/evanphx/json-patch/releases/tag/v4.2.0 This confused godeps. This PR updates our file to match godeps expectation. Fixes issue 77238 --- Godeps/Godeps.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index ac4dcbba1ad64..50d49c2f90f9b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1543,7 +1543,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Comment": "v4.1.0-19-g5858425", + "Comment": "v4.2.0", "Rev": "5858425f75500d40c52783dce87d085a483ce135" }, { From ea6597884ee10039f8d903b4356540bdf6ed2e7a Mon Sep 17 00:00:00 2001 From: Ling Huang Date: Mon, 29 Apr 2019 15:07:10 -0400 Subject: [PATCH 83/96] Upgrade Stackdriver Logging Agent addon image from 1.6.0 to 1.6.8. --- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/gce/gci/configure-helper.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index e65f3bf298179..ccb37b08f74e3 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -409,7 +409,7 @@ fi # Fluentd requirements # YAML exists to trigger a configuration refresh when changes are made. FLUENTD_GCP_YAML_VERSION="v3.2.0" -FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" +FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-1.6.8}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 5d69e19f9f38c..26755d781558c 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -421,7 +421,7 @@ fi # Fluentd requirements # YAML exists to trigger a configuration refresh when changes are made. FLUENTD_GCP_YAML_VERSION="v3.2.0" -FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" +FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-1.6.8}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index c8100b28671f8..3deae533bccfb 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -2318,7 +2318,7 @@ function setup-fluentd { fluentd_gcp_yaml_version="${FLUENTD_GCP_YAML_VERSION:-v3.2.0}" sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_yaml}" sed -i -e "s@{{ fluentd_gcp_yaml_version }}@${fluentd_gcp_yaml_version}@g" "${fluentd_gcp_scaler_yaml}" - fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.6-1.6.0-1}" + fluentd_gcp_version="${FLUENTD_GCP_VERSION:-1.6.8}" sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}" update-daemon-set-prometheus-to-sd-parameters ${fluentd_gcp_yaml} start-fluentd-resource-update ${fluentd_gcp_yaml} From 13f82a4cfdc7508c517b8dd21c5616f8ca30a522 Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Fri, 12 Apr 2019 18:37:53 -0700 Subject: [PATCH 84/96] Test kubectl cp escape --- pkg/kubectl/cmd/cp_test.go | 192 ++++++++++++++++++++++++++++++++++++- 1 file changed, 191 insertions(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/cp_test.go b/pkg/kubectl/cmd/cp_test.go index 7be8cd0344dbe..4e414654341b7 100644 --- a/pkg/kubectl/cmd/cp_test.go +++ b/pkg/kubectl/cmd/cp_test.go @@ -29,6 +29,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -565,7 +566,6 @@ func TestBadTar(t *testing.T) { t.Errorf("Error finding file: %v", err) } } - } func TestClean(t *testing.T) { @@ -692,3 +692,193 @@ func TestValidate(t *testing.T) { }) } } + +func TestUntar(t *testing.T) { + testdir, err := ioutil.TempDir("", "test-untar") + require.NoError(t, err) + defer os.RemoveAll(testdir) + t.Logf("Test base: %s", testdir) + + const ( + dest = "base" + ) + + type file struct { + path string + linkTarget string // For link types + expected string // Expect to find the file here (or not, if empty) + } + files := []file{{ + // Absolute file within dest + path: filepath.Join(testdir, dest, "abs"), + expected: filepath.Join(testdir, dest, testdir, dest, "abs"), + }, { // Absolute file outside dest + path: filepath.Join(testdir, "abs-out"), + expected: filepath.Join(testdir, dest, testdir, "abs-out"), + }, { // Absolute nested file within dest + path: filepath.Join(testdir, dest, "nested/nest-abs"), + expected: filepath.Join(testdir, dest, testdir, dest, "nested/nest-abs"), + }, { // Absolute nested file outside dest + path: filepath.Join(testdir, dest, "nested/../../nest-abs-out"), + expected: filepath.Join(testdir, dest, testdir, "nest-abs-out"), + }, { // Relative file inside dest + path: "relative", + expected: filepath.Join(testdir, dest, "relative"), + }, { // Relative file outside dest + path: "../unrelative", + expected: filepath.Join(testdir, dest, "unrelative"), + }, { // Nested relative file inside dest + path: "nested/nest-rel", + expected: filepath.Join(testdir, dest, "nested/nest-rel"), + }, { // Nested relative file outside dest + path: "nested/../../nest-unrelative", + expected: filepath.Join(testdir, dest, "nest-unrelative"), + }} + + mkExpectation := func(expected, suffix string) string { + if expected == "" { + return "" + } + return expected + suffix + } + links := []file{} + for _, f := range files { + links = append(links, file{ + path: f.path + "-innerlink", + linkTarget: "link-target", + expected: mkExpectation(f.expected, "-innerlink"), + }, file{ + path: f.path + "-innerlink-abs", + linkTarget: filepath.Join(testdir, dest, "link-target"), + expected: "", + }, file{ + path: f.path + "-outerlink", + linkTarget: filepath.Join(backtick(f.path), "link-target"), + expected: "", + }, file{ + path: f.path + "-outerlink-abs", + linkTarget: filepath.Join(testdir, "link-target"), + expected: "", + }) + } + files = append(files, links...) + + // Test back-tick escaping through a symlink. + files = append(files, + file{ + path: "nested/again/back-link", + linkTarget: "../../nested", + expected: filepath.Join(testdir, dest, "nested/again/back-link"), + }, + file{ + path: "nested/again/back-link/../../../back-link-file", + expected: filepath.Join(testdir, dest, "back-link-file"), + }) + + // Test chaining back-tick symlinks. + files = append(files, + file{ + path: "nested/back-link-first", + linkTarget: "../", + expected: filepath.Join(testdir, dest, "nested/back-link-first"), + }, + file{ + path: "nested/back-link-first/back-link-second", + linkTarget: "../", + expected: filepath.Join(testdir, dest, "back-link-second"), + }, + file{ + path: "nested/back-link-first/back-link-second/back-link-term", + }) + + buf := &bytes.Buffer{} + tw := tar.NewWriter(buf) + expectations := map[string]bool{} + for _, f := range files { + if f.expected != "" { + expectations[f.expected] = false + } + if f.linkTarget == "" { + hdr := &tar.Header{ + Name: f.path, + Mode: 0666, + Size: int64(len(f.path)), + } + require.NoError(t, tw.WriteHeader(hdr), f.path) + _, err := tw.Write([]byte(f.path)) + require.NoError(t, err, f.path) + } else { + hdr := &tar.Header{ + Name: f.path, + Mode: int64(0777 | os.ModeSymlink), + Typeflag: tar.TypeSymlink, + Linkname: f.linkTarget, + } + require.NoError(t, tw.WriteHeader(hdr), f.path) + } + } + tw.Close() + + opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) + + require.NoError(t, opts.untarAll(buf, filepath.Join(testdir, dest), "")) + + filepath.Walk(testdir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil // Ignore directories. + } + if _, ok := expectations[path]; !ok { + t.Errorf("Unexpected file at %s", path) + } else { + expectations[path] = true + } + return nil + }) + for path, found := range expectations { + if !found { + t.Errorf("Missing expected file %s", path) + } + } +} + +// backtick returns a path to one directory up from the target +func backtick(target string) string { + rel, _ := filepath.Rel(filepath.Dir(target), "../") + return rel +} + +func createTmpFile(t *testing.T, filepath, data string) { + f, err := os.Create(filepath) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer f.Close() + if _, err := io.Copy(f, bytes.NewBuffer([]byte(data))); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } +} + +func cmpFileData(t *testing.T, filePath, data string) { + f, err := os.Open(filePath) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + defer f.Close() + buff := &bytes.Buffer{} + if _, err := io.Copy(buff, f); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + if data != string(buff.Bytes()) { + t.Fatalf("expected: %s, saw: %s", data, string(buff.Bytes())) + } +} From 8649b4d9ebad4e3b6ae01619585237ec5863b92b Mon Sep 17 00:00:00 2001 From: Maciej Szulik Date: Tue, 16 Apr 2019 15:49:16 +0200 Subject: [PATCH 85/96] Properly handle links in tar --- pkg/kubectl/cmd/BUILD | 1 + pkg/kubectl/cmd/cp.go | 100 ++++++++------ pkg/kubectl/cmd/cp_test.go | 273 +++++++++++++++++++++---------------- 3 files changed, 209 insertions(+), 165 deletions(-) diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 885485458dc3e..7d1727447f9be 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -234,6 +234,7 @@ go_test( "//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/fake:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", "//vendor/gopkg.in/yaml.v2:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/pkg/kubectl/cmd/cp.go b/pkg/kubectl/cmd/cp.go index 768c6b71d679d..f4c945499e861 100644 --- a/pkg/kubectl/cmd/cp.go +++ b/pkg/kubectl/cmd/cp.go @@ -401,9 +401,7 @@ func clean(fileName string) string { return path.Clean(string(os.PathSeparator) + fileName) } -func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error { - entrySeq := -1 - +func (o *CopyOptions) untarAll(reader io.Reader, destDir, prefix string) error { // TODO: use compression here? tarReader := tar.NewReader(reader) for { @@ -414,52 +412,60 @@ func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error } break } - entrySeq++ - mode := header.FileInfo().Mode() - // all the files will start with the prefix, which is the directory where + + // All the files will start with the prefix, which is the directory where // they were located on the pod, we need to strip down that prefix, but - // if the prefix is missing it means the tar was tempered with + // if the prefix is missing it means the tar was tempered with. + // For the case where prefix is empty we need to ensure that the path + // is not absolute, which also indicates the tar file was tempered with. if !strings.HasPrefix(header.Name, prefix) { return fmt.Errorf("tar contents corrupted") } - outFileName := path.Join(destFile, clean(header.Name[len(prefix):])) - baseName := path.Dir(outFileName) + + // basic file information + mode := header.FileInfo().Mode() + destFileName := path.Join(destDir, header.Name[len(prefix):]) + baseName := path.Dir(destFileName) + if err := os.MkdirAll(baseName, 0755); err != nil { return err } if header.FileInfo().IsDir() { - if err := os.MkdirAll(outFileName, 0755); err != nil { + if err := os.MkdirAll(destFileName, 0755); err != nil { return err } continue } - // handle coping remote file into local directory - if entrySeq == 0 && !header.FileInfo().IsDir() { - exists, err := dirExists(outFileName) - if err != nil { - return err - } - if exists { - outFileName = filepath.Join(outFileName, path.Base(clean(header.Name))) - } + // We need to ensure that the destination file is always within boundries + // of the destination directory. This prevents any kind of path traversal + // from within tar archive. + dir, file := filepath.Split(destFileName) + evaledPath, err := filepath.EvalSymlinks(dir) + if err != nil { + return err + } + // For scrutiny we verify both the actual destination as well as we follow + // all the links that might lead outside of the destination directory. + if !isDestRelative(destDir, destFileName) || !isDestRelative(destDir, filepath.Join(evaledPath, file)) { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", destFileName, header.Linkname) + continue } if mode&os.ModeSymlink != 0 { linkname := header.Linkname - // error is returned if linkname can't be made relative to destFile, - // but relative can end up being ../dir that's why we also need to - // verify if relative path is the same after Clean-ing - relative, err := filepath.Rel(destFile, linkname) - if path.IsAbs(linkname) && (err != nil || relative != stripPathShortcuts(relative)) { - fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", outFileName, header.Linkname) + // We need to ensure that the link destination is always within boundries + // of the destination directory. This prevents any kind of path traversal + // from within tar archive. + if !isDestRelative(destDir, linkJoin(destFileName, linkname)) { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: link %q is pointing to %q which is outside target destination, skipping\n", destFileName, header.Linkname) continue } - if err := os.Symlink(linkname, outFileName); err != nil { + if err := os.Symlink(linkname, destFileName); err != nil { return err } } else { - outFile, err := os.Create(outFileName) + outFile, err := os.Create(destFileName) if err != nil { return err } @@ -473,14 +479,32 @@ func (o *CopyOptions) untarAll(reader io.Reader, destFile, prefix string) error } } - if entrySeq == -1 { - //if no file was copied - errInfo := fmt.Sprintf("error: %s no such file or directory", prefix) - return errors.New(errInfo) - } return nil } +// linkJoin joins base and link to get the final path to be created. +// It will consider whether link is an absolute path or not when returning result. +func linkJoin(base, link string) string { + if filepath.IsAbs(link) { + return link + } + return filepath.Join(base, link) +} + +// isDestRelative returns true if dest is pointing outside the base directory, +// false otherwise. +func isDestRelative(base, dest string) bool { + fullPath := dest + if !filepath.IsAbs(dest) { + fullPath = filepath.Join(base, dest) + } + relative, err := filepath.Rel(base, fullPath) + if err != nil { + return false + } + return relative == "." || relative == stripPathShortcuts(relative) +} + func getPrefix(file string) string { // tar strips the leading '/' if it's there, so we will too return strings.TrimLeft(file, "/") @@ -507,15 +531,3 @@ func (o *CopyOptions) execute(options *ExecOptions) error { } return nil } - -// dirExists checks if a path exists and is a directory. -func dirExists(path string) (bool, error) { - fi, err := os.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} diff --git a/pkg/kubectl/cmd/cp_test.go b/pkg/kubectl/cmd/cp_test.go index 4e414654341b7..0615163bf4ad9 100644 --- a/pkg/kubectl/cmd/cp_test.go +++ b/pkg/kubectl/cmd/cp_test.go @@ -19,17 +19,18 @@ package cmd import ( "archive/tar" "bytes" + "fmt" "io" "io/ioutil" "net/http" "os" - "os/exec" "path" "path/filepath" "strings" "testing" "github.com/stretchr/testify/require" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -128,6 +129,128 @@ func TestGetPrefix(t *testing.T) { } } +func TestStripPathShortcuts(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "test single path shortcut prefix", + input: "../foo/bar", + expected: "foo/bar", + }, + { + name: "test multiple path shortcuts", + input: "../../foo/bar", + expected: "foo/bar", + }, + { + name: "test multiple path shortcuts with absolute path", + input: "/tmp/one/two/../../foo/bar", + expected: "tmp/foo/bar", + }, + { + name: "test multiple path shortcuts with no named directory", + input: "../../", + expected: "", + }, + { + name: "test multiple path shortcuts with no named directory and no trailing slash", + input: "../..", + expected: "", + }, + { + name: "test multiple path shortcuts with absolute path and filename containing leading dots", + input: "/tmp/one/two/../../foo/..bar", + expected: "tmp/foo/..bar", + }, + { + name: "test multiple path shortcuts with no named directory and filename containing leading dots", + input: "../...foo", + expected: "...foo", + }, + { + name: "test filename containing leading dots", + input: "...foo", + expected: "...foo", + }, + { + name: "test root directory", + input: "/", + expected: "", + }, + } + + for _, test := range tests { + out := stripPathShortcuts(test.input) + if out != test.expected { + t.Errorf("expected: %s, saw: %s", test.expected, out) + } + } +} +func TestIsDestRelative(t *testing.T) { + tests := []struct { + base string + dest string + relative bool + }{ + { + base: "/dir", + dest: "../link", + relative: false, + }, + { + base: "/dir", + dest: "../../link", + relative: false, + }, + { + base: "/dir", + dest: "/link", + relative: false, + }, + { + base: "/dir", + dest: "link", + relative: true, + }, + { + base: "/dir", + dest: "int/file/link", + relative: true, + }, + { + base: "/dir", + dest: "int/../link", + relative: true, + }, + { + base: "/dir", + dest: "/dir/link", + relative: true, + }, + { + base: "/dir", + dest: "/dir/int/../link", + relative: true, + }, + { + base: "/dir", + dest: "/dir/../../link", + relative: false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if test.relative != isDestRelative(test.base, test.dest) { + t.Errorf("unexpected result for: base %q, dest %q", test.base, test.dest) + } + }) + } +} + func checkErr(t *testing.T, err error) { if err != nil { t.Errorf("unexpected error: %v", err) @@ -324,118 +447,6 @@ func TestTarUntarWrongPrefix(t *testing.T) { } } -// TestCopyToLocalFileOrDir tests untarAll in two cases : -// 1: copy pod file to local file -// 2: copy pod file into local directory -func TestCopyToLocalFileOrDir(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "input") - dir2, err2 := ioutil.TempDir(os.TempDir(), "output") - if err != nil || err2 != nil { - t.Errorf("unexpected error: %v | %v", err, err2) - t.FailNow() - } - defer func() { - if err := os.RemoveAll(dir); err != nil { - t.Errorf("Unexpected error cleaning up: %v", err) - } - if err := os.RemoveAll(dir2); err != nil { - t.Errorf("Unexpected error cleaning up: %v", err) - } - }() - - files := []struct { - name string - data string - dest string - destDirExists bool - }{ - { - name: "foo", - data: "foobarbaz", - dest: "path/to/dest", - destDirExists: false, - }, - { - name: "dir/blah", - data: "bazblahfoo", - dest: "dest/file/path", - destDirExists: true, - }, - } - - for _, file := range files { - func() { - // setup - srcFilePath := filepath.Join(dir, file.name) - destPath := filepath.Join(dir2, file.dest) - if err := os.MkdirAll(filepath.Dir(srcFilePath), 0755); err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - srcFile, err := os.Create(srcFilePath) - if err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - defer srcFile.Close() - - if _, err := io.Copy(srcFile, bytes.NewBuffer([]byte(file.data))); err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - if file.destDirExists { - if err := os.MkdirAll(destPath, 0755); err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - } - - // start tests - srcTarFilePath := filepath.Join(dir, file.name+".tar") - // here use tar command to create tar file instead of calling makeTar func - // because makeTar func can not generate correct header name - err = exec.Command("tar", "cf", srcTarFilePath, srcFilePath).Run() - if err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - srcTarFile, err := os.Open(srcTarFilePath) - if err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - defer srcTarFile.Close() - - opts := NewCopyOptions(genericclioptions.NewTestIOStreamsDiscard()) - if err := opts.untarAll(srcTarFile, destPath, getPrefix(srcFilePath)); err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - - actualDestFilePath := destPath - if file.destDirExists { - actualDestFilePath = filepath.Join(destPath, filepath.Base(srcFilePath)) - } - _, err = os.Stat(actualDestFilePath) - if err != nil && os.IsNotExist(err) { - t.Errorf("expecting %s exists, but actually it's missing", actualDestFilePath) - } - destFile, err := os.Open(actualDestFilePath) - if err != nil { - t.Errorf("unexpected error: %v", err) - t.FailNow() - } - defer destFile.Close() - buff := &bytes.Buffer{} - io.Copy(buff, destFile) - if file.data != string(buff.Bytes()) { - t.Errorf("expected: %s, actual: %s", file.data, string(buff.Bytes())) - } - }() - } - -} - func TestTarDestinationName(t *testing.T) { dir, err := ioutil.TempDir(os.TempDir(), "input") dir2, err2 := ioutil.TempDir(os.TempDir(), "output") @@ -531,7 +542,6 @@ func TestBadTar(t *testing.T) { name string body string }{ - {"/prefix/../../../tmp/foo", "Up to temp"}, {"/prefix/foo/bar/../../home/bburns/names.txt", "Down and back"}, } for _, file := range files { @@ -726,13 +736,13 @@ func TestUntar(t *testing.T) { expected: filepath.Join(testdir, dest, "relative"), }, { // Relative file outside dest path: "../unrelative", - expected: filepath.Join(testdir, dest, "unrelative"), + expected: "", }, { // Nested relative file inside dest path: "nested/nest-rel", expected: filepath.Join(testdir, dest, "nested/nest-rel"), }, { // Nested relative file outside dest path: "nested/../../nest-unrelative", - expected: filepath.Join(testdir, dest, "nest-unrelative"), + expected: "", }} mkExpectation := func(expected, suffix string) string { @@ -741,6 +751,13 @@ func TestUntar(t *testing.T) { } return expected + suffix } + mkBacktickExpectation := func(expected, suffix string) string { + dir, _ := filepath.Split(filepath.Clean(expected)) + if len(strings.Split(dir, string(os.PathSeparator))) <= 1 { + return "" + } + return expected + suffix + } links := []file{} for _, f := range files { links = append(links, file{ @@ -750,11 +767,11 @@ func TestUntar(t *testing.T) { }, file{ path: f.path + "-innerlink-abs", linkTarget: filepath.Join(testdir, dest, "link-target"), - expected: "", + expected: mkExpectation(f.expected, "-innerlink-abs"), }, file{ path: f.path + "-outerlink", linkTarget: filepath.Join(backtick(f.path), "link-target"), - expected: "", + expected: mkBacktickExpectation(f.expected, "-outerlink"), }, file{ path: f.path + "-outerlink-abs", linkTarget: filepath.Join(testdir, "link-target"), @@ -788,7 +805,19 @@ func TestUntar(t *testing.T) { expected: filepath.Join(testdir, dest, "back-link-second"), }, file{ - path: "nested/back-link-first/back-link-second/back-link-term", + // This case is chaining together symlinks that step back, so that + // if you just look at the target relative to the path it appears + // inside the destination directory, but if you actually follow each + // step of the path you end up outside the destination directory. + path: "nested/back-link-first/back-link-second/back-link-term", + linkTarget: "", + expected: "", + }) + + files = append(files, + file{ // Relative directory path with terminating / + path: "direct/dir/", + expected: "", }) buf := &bytes.Buffer{} @@ -805,8 +834,10 @@ func TestUntar(t *testing.T) { Size: int64(len(f.path)), } require.NoError(t, tw.WriteHeader(hdr), f.path) - _, err := tw.Write([]byte(f.path)) - require.NoError(t, err, f.path) + if !strings.HasSuffix(f.path, "/") { + _, err := tw.Write([]byte(f.path)) + require.NoError(t, err, f.path) + } } else { hdr := &tar.Header{ Name: f.path, From b587ea232d1d0d71134da419993b52ca5e5472e9 Mon Sep 17 00:00:00 2001 From: Wei Huang Date: Thu, 11 Oct 2018 11:39:57 -0700 Subject: [PATCH 86/96] use k8s.gcr.io/pause instead of kubernetes/pause --- test/e2e/scheduling/taints_test.go | 6 +++--- test/utils/runners.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/e2e/scheduling/taints_test.go b/test/e2e/scheduling/taints_test.go index 7f582b8f927ca..52b13eaa82b33 100644 --- a/test/e2e/scheduling/taints_test.go +++ b/test/e2e/scheduling/taints_test.go @@ -61,7 +61,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, Containers: []v1.Container{ { Name: "pause", - Image: "kubernetes/pause", + Image: "k8s.gcr.io/pause:3.1", }, }, }, @@ -80,7 +80,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, Containers: []v1.Container{ { Name: "pause", - Image: "kubernetes/pause", + Image: "k8s.gcr.io/pause:3.1", }, }, Tolerations: []v1.Toleration{{Key: "kubernetes.io/e2e-evict-taint-key", Value: "evictTaintVal", Effect: v1.TaintEffectNoExecute}}, @@ -99,7 +99,7 @@ func createPodForTaintsTest(hasToleration bool, tolerationSeconds int, podName, Containers: []v1.Container{ { Name: "pause", - Image: "kubernetes/pause", + Image: "k8s.gcr.io/pause:3.1", }, }, // default - tolerate forever diff --git a/test/utils/runners.go b/test/utils/runners.go index 01fb6a0d24c7a..8ca297cc21902 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -1026,7 +1026,7 @@ func MakePodSpec() v1.PodSpec { return v1.PodSpec{ Containers: []v1.Container{{ Name: "pause", - Image: "kubernetes/pause", + Image: "k8s.gcr.io/pause:3.1", Ports: []v1.ContainerPort{{ContainerPort: 80}}, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ @@ -1253,7 +1253,7 @@ type DaemonConfig struct { func (config *DaemonConfig) Run() error { if config.Image == "" { - config.Image = "kubernetes/pause" + config.Image = "k8s.gcr.io/pause:3.1" } nameLabel := map[string]string{ "name": config.Name + "-daemon", From ae38c81d165bd8e462f73e34e5052812729bdf71 Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Thu, 18 Apr 2019 11:52:51 +0200 Subject: [PATCH 87/96] Pick up security patches for fluentd-gcp-scaler by upgrading to version 0.5.2 --- cluster/addons/fluentd-gcp/scaler-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/fluentd-gcp/scaler-deployment.yaml b/cluster/addons/fluentd-gcp/scaler-deployment.yaml index a111a91be7e38..77287ca4ff282 100644 --- a/cluster/addons/fluentd-gcp/scaler-deployment.yaml +++ b/cluster/addons/fluentd-gcp/scaler-deployment.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: fluentd-gcp-scaler containers: - name: fluentd-gcp-scaler - image: k8s.gcr.io/fluentd-gcp-scaler:0.5.1 + image: k8s.gcr.io/fluentd-gcp-scaler:0.5.2 command: - /scaler.sh - --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }} From bbe788c9b95f416c29b85af48ea640c109768248 Mon Sep 17 00:00:00 2001 From: Ryan McNamara Date: Tue, 16 Apr 2019 12:55:15 -0700 Subject: [PATCH 88/96] Error when etcd3 watch finds delete event with nil prevKV --- .../k8s.io/apiserver/pkg/storage/etcd3/BUILD | 4 + .../apiserver/pkg/storage/etcd3/event.go | 10 +- .../apiserver/pkg/storage/etcd3/event_test.go | 110 ++++++++++++++++++ .../apiserver/pkg/storage/etcd3/watcher.go | 8 +- 4 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 staging/src/k8s.io/apiserver/pkg/storage/etcd3/event_test.go diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD index 6853f0f54725a..ecf7f76a4ef27 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -10,6 +10,7 @@ go_test( name = "go_default_test", srcs = [ "compact_test.go", + "event_test.go", "lease_manager_test.go", "store_test.go", "watcher_test.go", @@ -37,7 +38,10 @@ go_test( "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/integration:go_default_library", + "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", "//vendor/github.com/coreos/pkg/capnslog:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go index 7dc9175bcf874..dbaf785b26145 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -17,6 +17,7 @@ limitations under the License. package etcd3 import ( + "fmt" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/mvcc/mvccpb" ) @@ -42,7 +43,12 @@ func parseKV(kv *mvccpb.KeyValue) *event { } } -func parseEvent(e *clientv3.Event) *event { +func parseEvent(e *clientv3.Event) (*event, error) { + if !e.IsCreate() && e.PrevKv == nil { + // If the previous value is nil, error. One example of how this is possible is if the previous value has been compacted already. + return nil, fmt.Errorf("etcd event received with PrevKv=nil (key=%q, modRevision=%d, type=%s)", string(e.Kv.Key), e.Kv.ModRevision, e.Type.String()) + + } ret := &event{ key: string(e.Kv.Key), value: e.Kv.Value, @@ -53,5 +59,5 @@ func parseEvent(e *clientv3.Event) *event { if e.PrevKv != nil { ret.prevValue = e.PrevKv.Value } - return ret + return ret, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event_test.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event_test.go new file mode 100644 index 0000000000000..0bbcac3296010 --- /dev/null +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/event_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd3 + +import ( + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestParseEvent(t *testing.T) { + for _, tc := range []struct { + name string + etcdEvent *clientv3.Event + expectedEvent *event + expectedErr string + }{ + { + name: "successful create", + etcdEvent: &clientv3.Event{ + Type: clientv3.EventTypePut, + PrevKv: nil, + Kv: &mvccpb.KeyValue{ + // key is the key in bytes. An empty key is not allowed. + Key: []byte("key"), + ModRevision: 1, + CreateRevision: 1, + Value: []byte("value"), + }, + }, + expectedEvent: &event{ + key: "key", + value: []byte("value"), + prevValue: nil, + rev: 1, + isDeleted: false, + isCreated: true, + }, + expectedErr: "", + }, + { + name: "unsuccessful delete", + etcdEvent: &clientv3.Event{ + Type: mvccpb.DELETE, + PrevKv: nil, + Kv: &mvccpb.KeyValue{ + Key: []byte("key"), + CreateRevision: 1, + ModRevision: 2, + Value: nil, + }, + }, + expectedErr: "etcd event received with PrevKv=nil", + }, + { + name: "successful delete", + etcdEvent: &clientv3.Event{ + Type: mvccpb.DELETE, + PrevKv: &mvccpb.KeyValue{ + Key: []byte("key"), + CreateRevision: 1, + ModRevision: 1, + Value: []byte("value"), + }, + Kv: &mvccpb.KeyValue{ + Key: []byte("key"), + CreateRevision: 1, + ModRevision: 2, + Value: nil, + }, + }, + expectedEvent: &event{ + key: "key", + value: nil, + prevValue: []byte("value"), + rev: 2, + isDeleted: true, + isCreated: false, + }, + expectedErr: "", + }, + } { + t.Run(tc.name, func(t *testing.T) { + actualEvent, err := parseEvent(tc.etcdEvent) + if tc.expectedErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedErr) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedEvent, actualEvent) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index c1216d5884cdd..90c5d7760ba3d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -210,7 +210,13 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { return } for _, e := range wres.Events { - wc.sendEvent(parseEvent(e)) + parsedEvent, err := parseEvent(e) + if err != nil { + glog.Errorf("watch chan error: %v", err) + wc.sendError(err) + return + } + wc.sendEvent(parsedEvent) } } // When we come to this point, it's only possible that client side ends the watch. From 3ca22799965dc95bbca7f3220760600f47cf81df Mon Sep 17 00:00:00 2001 From: Dan Mace Date: Mon, 15 Oct 2018 15:31:25 -0400 Subject: [PATCH 89/96] Make CreatePrivilegedPSPBinding reentrant Make CreatePrivilegedPSPBinding reentrant so tests using it (e.g. DNS) can be executed more than once against a cluster. Without this change, such tests will fail because the PSP already exists, short circuiting test setup. --- test/e2e/framework/psp_util.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/e2e/framework/psp_util.go b/test/e2e/framework/psp_util.go index cd281f00d542d..30ba939095d5f 100644 --- a/test/e2e/framework/psp_util.go +++ b/test/e2e/framework/psp_util.go @@ -113,7 +113,9 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) { psp := PrivilegedPSP(podSecurityPolicyPrivileged) psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp) - ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) + if !apierrs.IsAlreadyExists(err) { + ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) + } if IsRBACEnabled(f) { // Create the Role to bind it to the namespace. @@ -126,7 +128,9 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) { Verbs: []string{"use"}, }}, }) - ExpectNoError(err, "Failed to create PSP role") + if !apierrs.IsAlreadyExists(err) { + ExpectNoError(err, "Failed to create PSP role") + } } }) From 6733165f255e12bbb7faa59e831fc318bd226c6f Mon Sep 17 00:00:00 2001 From: Yassine TIJANI Date: Thu, 9 May 2019 14:53:38 +0200 Subject: [PATCH 90/96] check if Memory is not nil for container stats --- pkg/kubelet/stats/helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index 6bb7a6f3db391..5501f3cc956a5 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -52,7 +52,7 @@ func cadvisorInfoToCPUandMemoryStats(info *cadvisorapiv2.ContainerInfo) (*statsa cpuStats.UsageCoreNanoSeconds = &cstat.Cpu.Usage.Total } } - if info.Spec.HasMemory { + if info.Spec.HasMemory && cstat.Memory != nil { pageFaults := cstat.Memory.ContainerData.Pgfault majorPageFaults := cstat.Memory.ContainerData.Pgmajfault memoryStats = &statsapi.MemoryStats{ From 65a1ffb2c774719a2c65747299410eb3448aeec0 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Wed, 8 May 2019 15:05:07 -0700 Subject: [PATCH 91/96] In GuaranteedUpdate, retry on any error if we are working with stale data --- .../registry/generic/registry/store_test.go | 44 +++++++++++++++++++ .../apiserver/pkg/storage/etcd3/store.go | 23 +++++----- 2 files changed, 56 insertions(+), 11 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go index bdc06164d3d90..6694337afd6dc 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go @@ -2108,3 +2108,47 @@ func TestDeletionFinalizersForGarbageCollection(t *testing.T) { } } } + +type staleGuaranteedUpdateStorage struct { + storage.Interface + cachedObj runtime.Object +} + +// GuaranteedUpdate overwrites the method with one that always suggests the cachedObj. +func (s *staleGuaranteedUpdateStorage) GuaranteedUpdate( + ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, + preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error { + return s.Interface.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, s.cachedObj) +} + +func TestDeleteWithCachedObject(t *testing.T) { + podName := "foo" + podWithFinalizer := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName, Finalizers: []string{"foo.com/x"}}, + Spec: example.PodSpec{NodeName: "machine"}, + } + podWithNoFinalizer := &example.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: podName}, + Spec: example.PodSpec{NodeName: "machine"}, + } + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test") + destroyFunc, registry := newTestGenericStoreRegistry(t, scheme, false) + defer destroyFunc() + // cached object does not have any finalizer. + registry.Storage.Storage = &staleGuaranteedUpdateStorage{Interface: registry.Storage.Storage, cachedObj: podWithNoFinalizer} + // created object with pending finalizer. + _, err := registry.Create(ctx, podWithFinalizer, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + // The object shouldn't be deleted, because the persisted object has pending finalizers. + _, _, err = registry.Delete(ctx, podName, nil) + if err != nil { + t.Fatal(err) + } + // The object should still be there + _, err = registry.Get(ctx, podName, &metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index d8aa8b2fd37c0..17e2304422e09 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -300,19 +300,20 @@ func (s *store) GuaranteedUpdate( ret, ttl, err := s.updateState(origState, tryUpdate) if err != nil { - // It's possible we were working with stale data - if mustCheckData && apierrors.IsConflict(err) { - // Actually fetch - origState, err = getCurrentState() - if err != nil { - return err - } - mustCheckData = false - // Retry - continue + // If our data is already up to date, return the error + if !mustCheckData { + return err } - return err + // It's possible we were working with stale data + // Actually fetch + origState, err = getCurrentState() + if err != nil { + return err + } + mustCheckData = false + // Retry + continue } data, err := runtime.Encode(s.codec, ret) From 110387870d2dbe40b09df84c6b7196f53111e509 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Tue, 14 May 2019 09:29:16 -0700 Subject: [PATCH 92/96] BoundServiceAccountTokenVolume: fix InClusterConfig --- .../k8s.io/apiserver/pkg/util/webhook/authentication.go | 1 + staging/src/k8s.io/client-go/rest/transport.go | 7 ++++--- .../src/k8s.io/client-go/tools/clientcmd/client_config.go | 2 ++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go index 501cde418a5be..23350767baec4 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -138,6 +138,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { config.BearerToken = configAuthInfo.Token + config.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { diff --git a/staging/src/k8s.io/client-go/rest/transport.go b/staging/src/k8s.io/client-go/rest/transport.go index 25c1801b67bcf..f81ff091b3a4c 100644 --- a/staging/src/k8s.io/client-go/rest/transport.go +++ b/staging/src/k8s.io/client-go/rest/transport.go @@ -74,9 +74,10 @@ func (c *Config) TransportConfig() (*transport.Config, error) { KeyFile: c.KeyFile, KeyData: c.KeyData, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index 393868e7a1f00..f200eccd2e8da 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -228,6 +228,7 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI // blindly overwrite existing values based on precedence if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { @@ -501,6 +502,7 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) } if token := config.overrides.AuthInfo.Token; len(token) > 0 { icc.BearerToken = token + icc.BearerTokenFile = "" } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile From b83756b3181f464720bfb468a171a58fc110c3e8 Mon Sep 17 00:00:00 2001 From: Yucheng Wu Date: Tue, 14 May 2019 14:49:38 +0800 Subject: [PATCH 93/96] fix CVE-2019-11244: `kubectl --http-cache=` creates world-writeable cached schema files --- .../client-go/discovery/cached_discovery.go | 4 +- .../discovery/cached_discovery_test.go | 27 ++++++++++ .../client-go/discovery/round_tripper.go | 3 ++ .../client-go/discovery/round_tripper_test.go | 52 +++++++++++++++++++ 4 files changed, 84 insertions(+), 2 deletions(-) diff --git a/staging/src/k8s.io/client-go/discovery/cached_discovery.go b/staging/src/k8s.io/client-go/discovery/cached_discovery.go index d38a0bbdad31f..e98cad98897d7 100644 --- a/staging/src/k8s.io/client-go/discovery/cached_discovery.go +++ b/staging/src/k8s.io/client-go/discovery/cached_discovery.go @@ -162,7 +162,7 @@ func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { } func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { return err } @@ -181,7 +181,7 @@ func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Obj return err } - err = os.Chmod(f.Name(), 0755) + err = os.Chmod(f.Name(), 0660) if err != nil { return err } diff --git a/staging/src/k8s.io/client-go/discovery/cached_discovery_test.go b/staging/src/k8s.io/client-go/discovery/cached_discovery_test.go index 278931c2d0904..637fefcc9dd7e 100644 --- a/staging/src/k8s.io/client-go/discovery/cached_discovery_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached_discovery_test.go @@ -19,6 +19,7 @@ package discovery import ( "io/ioutil" "os" + "path/filepath" "testing" "time" @@ -95,6 +96,32 @@ func TestNewCachedDiscoveryClient_TTL(t *testing.T) { assert.Equal(c.groupCalls, 2) } +func TestNewCachedDiscoveryClient_PathPerm(t *testing.T) { + assert := assert.New(t) + + d, err := ioutil.TempDir("", "") + assert.NoError(err) + os.RemoveAll(d) + defer os.RemoveAll(d) + + c := fakeDiscoveryClient{} + cdc := newCachedDiscoveryClient(&c, d, 1*time.Nanosecond) + cdc.ServerGroups() + + err = filepath.Walk(d, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + assert.Equal(os.FileMode(0750), info.Mode().Perm()) + } else { + assert.Equal(os.FileMode(0660), info.Mode().Perm()) + } + return nil + }) + assert.NoError(err) +} + type fakeDiscoveryClient struct { groupCalls int resourceCalls int diff --git a/staging/src/k8s.io/client-go/discovery/round_tripper.go b/staging/src/k8s.io/client-go/discovery/round_tripper.go index 75b7f52097711..86081c18c7765 100644 --- a/staging/src/k8s.io/client-go/discovery/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/round_tripper.go @@ -18,6 +18,7 @@ package discovery import ( "net/http" + "os" "path/filepath" "github.com/golang/glog" @@ -35,6 +36,8 @@ type cacheRoundTripper struct { // corresponding requests. func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { d := diskv.New(diskv.Options{ + PathPerm: os.FileMode(0750), + FilePerm: os.FileMode(0660), BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp"), }) diff --git a/staging/src/k8s.io/client-go/discovery/round_tripper_test.go b/staging/src/k8s.io/client-go/discovery/round_tripper_test.go index b15e2e771835f..09319a4c03a71 100644 --- a/staging/src/k8s.io/client-go/discovery/round_tripper_test.go +++ b/staging/src/k8s.io/client-go/discovery/round_tripper_test.go @@ -22,7 +22,10 @@ import ( "net/http" "net/url" "os" + "path/filepath" "testing" + + "github.com/stretchr/testify/assert" ) // copied from k8s.io/client-go/transport/round_trippers_test.go @@ -93,3 +96,52 @@ func TestCacheRoundTripper(t *testing.T) { t.Errorf("Invalid content read from cache %q", string(content)) } } + +func TestCacheRoundTripperPathPerm(t *testing.T) { + assert := assert.New(t) + + rt := &testRoundTripper{} + cacheDir, err := ioutil.TempDir("", "cache-rt") + os.RemoveAll(cacheDir) + defer os.RemoveAll(cacheDir) + + if err != nil { + t.Fatal(err) + } + cache := newCacheRoundTripper(cacheDir, rt) + + // First call, caches the response + req := &http.Request{ + Method: http.MethodGet, + URL: &url.URL{Host: "localhost"}, + } + rt.Response = &http.Response{ + Header: http.Header{"ETag": []string{`"123456"`}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Content"))), + StatusCode: http.StatusOK, + } + resp, err := cache.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "Content" { + t.Errorf(`Expected Body to be "Content", got %q`, string(content)) + } + + err = filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + assert.Equal(os.FileMode(0750), info.Mode().Perm()) + } else { + assert.Equal(os.FileMode(0660), info.Mode().Perm()) + } + return nil + }) + assert.NoError(err) +} From f1c00f37919b425d4264e5b7728de7eea95a8871 Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Fri, 17 May 2019 10:02:08 -0400 Subject: [PATCH 94/96] Terminate watchers when watch cache is destroyed --- .../apiserver/pkg/storage/cacher/cacher.go | 1 + .../storage/cacher/cacher_whitebox_test.go | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index fc4fd3c0c4baf..66028cd53fa14 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -234,6 +234,7 @@ func NewCacherFromConfig(config Config) *Cacher { cacher.stopWg.Add(1) go func() { defer cacher.stopWg.Done() + defer cacher.terminateAllWatchers() wait.Until( func() { if !cacher.isStopped() { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go index 0009ec7bfbcc5..09f70bb51bf8b 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher_whitebox_test.go @@ -438,3 +438,39 @@ func TestWatcherNotGoingBackInTime(t *testing.T) { } } } + +func TestCacheWatcherStoppedOnDestroy(t *testing.T) { + backingStorage := &dummyStorage{} + cacher, _ := newTestCacher(backingStorage, 1000) + defer cacher.Stop() + + // Wait until cacher is initialized. + cacher.ready.wait() + + w, err := cacher.Watch(context.Background(), "pods/ns", "0", storage.Everything) + if err != nil { + t.Fatalf("Failed to create watch: %v", err) + } + + watchClosed := make(chan struct{}) + go func() { + defer close(watchClosed) + for event := range w.ResultChan() { + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted: + // ok + default: + t.Errorf("unexpected event %#v", event) + } + } + }() + + cacher.Stop() + + select { + case <-watchClosed: + case <-time.After(wait.ForeverTestTimeout): + t.Errorf("timed out waiting for watch to close") + } + +} From c3c2c9b35cdedbf6dfac198b8ffc8fc137dab85e Mon Sep 17 00:00:00 2001 From: Jordan Liggitt Date: Wed, 15 May 2019 08:15:02 -0400 Subject: [PATCH 95/96] honor overridden tokenfile, add InClusterConfig override tests --- .../tools/clientcmd/client_config.go | 6 +-- .../tools/clientcmd/client_config_test.go | 38 +++++++++++++++++-- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index f200eccd2e8da..e012e42ef3bf8 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -500,9 +500,9 @@ func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) if server := config.overrides.ClusterInfo.Server; len(server) > 0 { icc.Host = server } - if token := config.overrides.AuthInfo.Token; len(token) > 0 { - icc.BearerToken = token - icc.BearerTokenFile = "" + if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { + icc.BearerToken = config.overrides.AuthInfo.Token + icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile } if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { icc.TLSClientConfig.CAFile = certificateAuthorityFile diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go index 21a7d32271c56..22be6e38ae87d 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config_test.go @@ -547,6 +547,30 @@ func TestInClusterClientConfigPrecedence(t *testing.T) { }, }, }, + { + overrides: &ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: "https://host-from-overrides.com", + CertificateAuthority: "/path/to/ca-from-overrides.crt", + }, + AuthInfo: clientcmdapi.AuthInfo{ + Token: "token-from-override", + TokenFile: "tokenfile-from-override", + }, + }, + }, + { + overrides: &ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{ + Server: "https://host-from-overrides.com", + CertificateAuthority: "/path/to/ca-from-overrides.crt", + }, + AuthInfo: clientcmdapi.AuthInfo{ + Token: "", + TokenFile: "tokenfile-from-override", + }, + }, + }, { overrides: &ConfigOverrides{}, }, @@ -555,13 +579,15 @@ func TestInClusterClientConfigPrecedence(t *testing.T) { for _, tc := range tt { expectedServer := "https://host-from-cluster.com" expectedToken := "token-from-cluster" + expectedTokenFile := "tokenfile-from-cluster" expectedCAFile := "/path/to/ca-from-cluster.crt" icc := &inClusterClientConfig{ inClusterConfigProvider: func() (*restclient.Config, error) { return &restclient.Config{ - Host: expectedServer, - BearerToken: expectedToken, + Host: expectedServer, + BearerToken: expectedToken, + BearerTokenFile: expectedTokenFile, TLSClientConfig: restclient.TLSClientConfig{ CAFile: expectedCAFile, }, @@ -578,8 +604,9 @@ func TestInClusterClientConfigPrecedence(t *testing.T) { if overridenServer := tc.overrides.ClusterInfo.Server; len(overridenServer) > 0 { expectedServer = overridenServer } - if overridenToken := tc.overrides.AuthInfo.Token; len(overridenToken) > 0 { - expectedToken = overridenToken + if len(tc.overrides.AuthInfo.Token) > 0 || len(tc.overrides.AuthInfo.TokenFile) > 0 { + expectedToken = tc.overrides.AuthInfo.Token + expectedTokenFile = tc.overrides.AuthInfo.TokenFile } if overridenCAFile := tc.overrides.ClusterInfo.CertificateAuthority; len(overridenCAFile) > 0 { expectedCAFile = overridenCAFile @@ -591,6 +618,9 @@ func TestInClusterClientConfigPrecedence(t *testing.T) { if clientConfig.BearerToken != expectedToken { t.Errorf("Expected token %v, got %v", expectedToken, clientConfig.BearerToken) } + if clientConfig.BearerTokenFile != expectedTokenFile { + t.Errorf("Expected tokenfile %v, got %v", expectedTokenFile, clientConfig.BearerTokenFile) + } if clientConfig.TLSClientConfig.CAFile != expectedCAFile { t.Errorf("Expected Certificate Authority %v, got %v", expectedCAFile, clientConfig.TLSClientConfig.CAFile) } From c1242957f9b1b539af842799c3d9df8c2d0768a9 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Thu, 9 May 2019 08:54:19 +0000 Subject: [PATCH 96/96] fix incorrect prometheus metrics --- .../providers/azure/azure_client.go | 98 +++++++------------ .../providers/azure/azure_metrics.go | 4 +- 2 files changed, 36 insertions(+), 66 deletions(-) diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 16dbba4d57d9d..06664d3712e42 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -189,8 +189,7 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand compute.InstanceViewTypes) (result compute.VirtualMachine, err error) { @@ -276,13 +275,11 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (result network.Interface, err error) { @@ -354,14 +351,12 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, loadBalancerName, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) (resp *http.Response, err error) { @@ -378,14 +373,12 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, loadBalancerName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName string, loadBalancerName string, expand string) (result network.LoadBalancer, err error) { @@ -470,14 +463,12 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupName string, publicIPAddressName string) (resp *http.Response, err error) { @@ -494,14 +485,12 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, publicIPAddressName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName string, publicIPAddressName string, expand string) (result network.PublicIPAddress, err error) { @@ -586,13 +575,11 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string) (resp *http.Response, err error) { @@ -610,13 +597,11 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, virtualNetworkName, subnetName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, virtualNetworkName string, subnetName string, expand string) (result network.Subnet, err error) { @@ -648,8 +633,8 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) iterator, err := az.client.ListComplete(ctx, resourceGroupName, virtualNetworkName) + mc.Observe(err) if err != nil { - mc.Observe(err) return nil, err } @@ -701,13 +686,11 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName string, networkSecurityGroupName string) (resp *http.Response, err error) { @@ -725,13 +708,11 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, networkSecurityGroupName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName string, networkSecurityGroupName string, expand string) (result network.SecurityGroup, err error) { @@ -946,14 +927,12 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // azRoutesClient implements RoutesClient. @@ -992,13 +971,11 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, routeTableName, routeName, routeParameters) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) (resp *http.Response, err error) { @@ -1016,13 +993,11 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, routeTableName, routeName) if err != nil { - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } // azRouteTablesClient implements RouteTablesClient. @@ -1060,14 +1035,12 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string, routeTableName string, expand string) (result network.RouteTable, err error) { @@ -1126,8 +1099,7 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { @@ -1232,14 +1204,12 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) future, err := az.client.CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (resp *http.Response, err error) { @@ -1256,14 +1226,12 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) future, err := az.client.Delete(ctx, resourceGroupName, diskName) - mc.Observe(err) if err != nil { - return future.Response(), err + return future.Response(), mc.Observe(err) } err = future.WaitForCompletionRef(ctx, az.client.Client) - mc.Observe(err) - return future.Response(), err + return future.Response(), mc.Observe(err) } func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, err error) { diff --git a/pkg/cloudprovider/providers/azure/azure_metrics.go b/pkg/cloudprovider/providers/azure/azure_metrics.go index cae35c594b66a..0e53f0b5f4240 100644 --- a/pkg/cloudprovider/providers/azure/azure_metrics.go +++ b/pkg/cloudprovider/providers/azure/azure_metrics.go @@ -49,12 +49,14 @@ func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *me } } -func (mc *metricContext) Observe(err error) { +func (mc *metricContext) Observe(err error) error { apiMetrics.latency.WithLabelValues(mc.attributes...).Observe( time.Since(mc.start).Seconds()) if err != nil { apiMetrics.errors.WithLabelValues(mc.attributes...).Inc() } + + return err } func registerAPIMetrics(attributes ...string) *apiCallMetrics {