diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 92f4c55d5e0ac..83b7a6600cee0 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -22,10 +22,13 @@ import ( "strings" "sync" + "github.com/vmware/govmomi/object" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) // Stores info about the kubernetes node @@ -47,17 +50,25 @@ type NodeManager struct { registeredNodes map[string]*v1.Node //CredentialsManager credentialManager *SecretCredentialManager + // Maps node name to zone information + zoneInfoMap map[string]*cloudprovider.Zone + // zone tag category from vsphere.conf + zoneCategoryName string + // region tag category from vsphere.conf + regionCategoryName string // Mutexes registeredNodesLock sync.RWMutex nodeInfoLock sync.RWMutex credentialManagerLock sync.Mutex + zoneInfoLock sync.RWMutex } type NodeDetails struct { NodeName string vm *vclib.VirtualMachine VMUUID string + Zone cloudprovider.Zone } // TODO: Make it configurable in vsphere.conf @@ -190,6 +201,12 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", node.Name, vm, res.vc, res.datacenter.Name()) + // keep the zones discovered for the node + nodeZone := node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] + nodeRegion := node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] + zone := cloudprovider.Zone{FailureDomain: nodeZone, Region: nodeRegion} + klog.V(4).Infof("Storing zone %v for node %s", zone, node.Name) + nm.addZoneInfo(ctx, node.ObjectMeta.Name, &zone) nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID} nm.addNodeInfo(node.ObjectMeta.Name, nodeInfo) for range queueChannel { @@ -214,6 +231,20 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { return vclib.ErrNoVMFound } +func (nm *NodeManager) addZoneInfo(ctx context.Context, nodeName string, zone *cloudprovider.Zone) { + klog.V(4).Infof("add zone info to node %s: %v", nodeName, *zone) + nm.zoneInfoLock.Lock() + nm.zoneInfoMap[nodeName] = zone + nm.zoneInfoLock.Unlock() +} + +func (nm *NodeManager) getZoneInfo(ctx context.Context, nodeName string) (*cloudprovider.Zone, error) { + nm.zoneInfoLock.RLock() + zone := nm.zoneInfoMap[nodeName] + nm.zoneInfoLock.RUnlock() + return zone, nil +} + func (nm *NodeManager) RegisterNode(node *v1.Node) error { nm.addNode(node) nm.DiscoverNode(node) @@ -258,6 +289,10 @@ func (nm *NodeManager) removeNode(node *v1.Node) { nm.nodeInfoLock.Lock() delete(nm.nodeInfoMap, node.ObjectMeta.Name) nm.nodeInfoLock.Unlock() + + nm.zoneInfoLock.Lock() + delete(nm.zoneInfoMap, node.ObjectMeta.Name) + nm.zoneInfoLock.Unlock() } // GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address. @@ -310,7 +345,11 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { return nil, err } klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) - nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID}) + zone, err := nm.getZoneInfo(context.TODO(), nodeName) + if err != nil { + return nil, err + } + nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID, *zone}) } return nodeDetails, nil } @@ -443,3 +482,27 @@ func (nm *NodeManager) UpdateCredentialManager(credentialManager *SecretCredenti defer nm.credentialManagerLock.Unlock() nm.credentialManager = credentialManager } + +func (nm *NodeManager) GetHostsInZone(ctx context.Context, zone cloudprovider.Zone) ([]*object.HostSystem, error) { + klog.V(7).Infof("GetHostsInZone called with registeredNodes: %v", nm.registeredNodes) + nodeDetails, err := nm.GetNodeDetails() + if err != nil { + return nil, err + } + klog.V(4).Infof("Node Details: %v", nodeDetails) + // Return those hosts that are in given zone. + hosts := make([]*object.HostSystem, 0) + for _, n := range nodeDetails { + // Match the provided zone with the node's zone, if not empty. + if (zone.FailureDomain == "" || zone.FailureDomain == n.Zone.FailureDomain) && (zone.Region == "" || zone.Region == n.Zone.Region) { + host, err := n.vm.HostSystem(ctx) + if err != nil { + klog.Errorf("Failed to get host system for VM %s. err: %+v", n.vm, err) + continue + } + hosts = append(hosts, host) + } + } + klog.V(4).Infof("GetHostsInZone %v returning: %v", zone, hosts) + return hosts, nil +} diff --git a/pkg/cloudprovider/providers/vsphere/vclib/constants.go b/pkg/cloudprovider/providers/vsphere/vclib/constants.go index 522f308b8b34a..36b3ad6a742e8 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/constants.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/constants.go @@ -40,15 +40,18 @@ const ( // Other Constants const ( - LogLevel = 4 - DatastoreProperty = "datastore" - ResourcePoolProperty = "resourcePool" - DatastoreInfoProperty = "info" - VirtualMachineType = "VirtualMachine" - RoundTripperDefaultCount = 3 - VSANDatastoreType = "vsan" - DummyVMPrefixName = "vsphere-k8s" - ActivePowerState = "poweredOn" + LogLevel = 4 + DatastoreProperty = "datastore" + ResourcePoolProperty = "resourcePool" + DatastoreInfoProperty = "info" + VirtualMachineType = "VirtualMachine" + RoundTripperDefaultCount = 3 + VSANDatastoreType = "vsan" + DummyVMPrefixName = "vsphere-k8s" + ActivePowerState = "poweredOn" + DatacenterType = "Datacenter" + ClusterComputeResourceType = "ClusterComputeResource" + HostSystemType = "HostSystem" ) // Test Constants diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go index 778e0f6829005..1b4ecc4819223 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go @@ -145,6 +145,20 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto return dsURLInfoMap, nil } +func (dc *Datacenter) GetAllHosts(ctx context.Context) ([]types.ManagedObjectReference, error) { + finder := getFinder(dc) + hostSystems, err := finder.HostSystemList(ctx, "*") + if err != nil { + klog.Errorf("Failed to get all hostSystems. err: %+v", err) + return nil, err + } + var hostMors []types.ManagedObjectReference + for _, hs := range hostSystems { + hostMors = append(hostMors, hs.Reference()) + } + return hostMors, nil +} + // GetDatastoreByPath gets the Datastore object from the given vmDiskPath func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) { datastorePathObj := new(object.DatastorePath) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datastore.go b/pkg/cloudprovider/providers/vsphere/vclib/datastore.go index a57685bc76cb1..e77d998d01d53 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/datastore.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/datastore.go @@ -85,3 +85,19 @@ func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storageP } return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds) } + +// Get the host names mounted on given datastore +func (ds *Datastore) GetDatastoreHostMounts(ctx context.Context) ([]types.ManagedObjectReference, error) { + var dsMo mo.Datastore + pc := property.DefaultCollector(ds.Client()) + err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"host"}, &dsMo) + if err != nil { + klog.Error("Failed to retrieve datastore host mount property. err: %v", err) + return nil, err + } + hosts := make([]types.ManagedObjectReference, 0) + for _, dsHostMount := range dsMo.Host { + hosts = append(hosts, dsHostMount.Key) + } + return hosts, nil +} diff --git a/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go b/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go index 989ed4468158c..02f9e9ce944ab 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go @@ -33,6 +33,7 @@ type VolumeOptions struct { StoragePolicyName string StoragePolicyID string SCSIControllerType string + Zone []string } var ( diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 2e562b036ed2a..f7976dae728e9 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -33,11 +33,14 @@ import ( "gopkg.in/gcfg.v1" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25/mo" + vmwaretypes "github.com/vmware/govmomi/vim25/types" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" cloudprovider "k8s.io/cloud-provider" @@ -45,6 +48,8 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) // VSphere Cloud Provider constants @@ -56,6 +61,8 @@ const ( MacOuiVC = "00:50:56" MacOuiEsx = "00:0c:29" CleanUpDummyVMRoutineInterval = 5 + DefaultZoneCategoryName = "zone" + DefaultRegionCategoryName = "region" ) var cleanUpRoutineInitialized = false @@ -68,12 +75,14 @@ var cleanUpDummyVMLock sync.RWMutex const ( MissingUsernameErrMsg = "Username is missing" MissingPasswordErrMsg = "Password is missing" + NoZoneTagInVCErrMsg = "No zone tags found in vCenter" ) // Error constants var ( ErrUsernameMissing = errors.New(MissingUsernameErrMsg) ErrPasswordMissing = errors.New(MissingPasswordErrMsg) + NoZoneTagInVC = errors.New(NoZoneTagInVCErrMsg) ) var _ cloudprovider.Interface = (*VSphere)(nil) @@ -503,6 +512,14 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) { if cfg.Global.VCenterPort == "" { cfg.Global.VCenterPort = "443" } + if cfg.Labels.Zone == "" { + cfg.Labels.Zone = DefaultZoneCategoryName + } + if cfg.Labels.Region == "" { + cfg.Labels.Region = DefaultRegionCategoryName + } + klog.V(4).Infof("vSphere cloud provider will look for tag category of %s for zone and %s for region", cfg.Labels.Zone, cfg.Labels.Region) + vsphereInstanceMap, err := populateVsphereInstanceMap(&cfg) if err != nil { return nil, err @@ -514,6 +531,9 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) { vsphereInstanceMap: vsphereInstanceMap, nodeInfoMap: make(map[string]*NodeInfo), registeredNodes: make(map[string]*v1.Node), + zoneInfoMap: make(map[string]*cloudprovider.Zone), + zoneCategoryName: cfg.Labels.Zone, + regionCategoryName: cfg.Labels.Region, }, isSecretInfoProvided: isSecretInfoProvided, cfg: &cfg, @@ -1137,6 +1157,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo klog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { var datastore string + var dsList []*vclib.DatastoreInfo // If datastore not specified, then use default datastore if volumeOptions.Datastore == "" { datastore = vs.cfg.Workspace.DefaultDatastore @@ -1157,6 +1178,28 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo } var vmOptions *vclib.VMOptions if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" { + // If datastore and zone are specified, first validate if the datastore is in the provided zone. + if len(volumeOptions.Zone) != 0 && volumeOptions.Datastore != "" { + klog.V(4).Infof("Specified zone : %s, datastore : %s", volumeOptions.Zone, volumeOptions.Datastore) + dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone) + if err != nil { + return "", err + } + + // Validate if the datastore provided belongs to the zone. If not, fail the operation. + found := false + for _, ds := range dsList { + if ds.Info.Name == volumeOptions.Datastore { + found = true + break + } + } + if !found { + err := fmt.Errorf("The specified datastore %s does not match the provided zones : %s", volumeOptions.Datastore, volumeOptions.Zone) + klog.Error(err) + return "", err + } + } // Acquire a read lock to ensure multiple PVC requests can be processed simultaneously. cleanUpDummyVMLock.RLock() defer cleanUpDummyVMLock.RUnlock() @@ -1176,29 +1219,89 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo } } if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" { - datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager) + if len(volumeOptions.Zone) == 0 { + klog.V(4).Infof("Selecting a shared datastore as per the storage policy %s", volumeOptions.StoragePolicyName) + datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager) + } else { + // If zone is specified, first get the datastores in the zone. + dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone) + + // If unable to get any datastore, fail the operation. + if len(dsList) == 0 { + err := fmt.Errorf("Failed to find a shared datastore matching zone %s", volumeOptions.Zone) + klog.Error(err) + return "", err + } + + klog.V(4).Infof("Specified zone : %s. Picking a datastore as per the storage policy %s among the zoned datastores : %s", volumeOptions.Zone, + volumeOptions.StoragePolicyName, dsList) + // Among the compatible datastores, select the one based on the maximum free space. + datastore, err = getPbmCompatibleZonedDatastore(ctx, dc, volumeOptions.StoragePolicyName, dsList) + } + klog.V(1).Infof("Datastore selected as per policy : %s", datastore) if err != nil { klog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } } else { - // Since no storage policy is specified but datastore is specified, check - // if the given datastore is a shared datastore across all node VMs. - sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager) - if err != nil { - klog.Errorf("Failed to get shared datastore: %+v", err) - return "", err - } - found := false - for _, sharedDs := range sharedDsList { - if datastore == sharedDs.Info.Name { - found = true - break + // If zone is specified, pick the datastore in the zone with maximum free space within the zone. + if volumeOptions.Datastore == "" && len(volumeOptions.Zone) != 0 { + klog.V(4).Infof("Specified zone : %s", volumeOptions.Zone) + dsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone) + + // If unable to get any datastore, fail the operation + if len(dsList) == 0 { + err := fmt.Errorf("Failed to find a shared datastore matching zone %s", volumeOptions.Zone) + klog.Error(err) + return "", err + } + + if err != nil { + return "", err + } + datastore, err = getMostFreeDatastoreName(ctx, nil, dsList) + if err != nil { + klog.Errorf("Failed to get shared datastore: %+v", err) + return "", err + } + klog.V(1).Infof("Specified zone : %s. Selected datastore : %s", volumeOptions.StoragePolicyName, datastore) + } else { + var sharedDsList []*vclib.DatastoreInfo + var err error + if len(volumeOptions.Zone) == 0 { + // If zone is not provided, get the shared datastore across all node VMs. + klog.V(4).Infof("Validating if datastore %s is shared across all node VMs", datastore) + var err error + sharedDsList, err = getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager) + if err != nil { + klog.Errorf("Failed to get shared datastore: %+v", err) + return "", err + } + // Prepare error msg to be used later, if required. + err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastore) + } else { + // If zone is provided, get the shared datastores in that zone. + klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastore, volumeOptions.Zone) + sharedDsList, err = getDatastoresForZone(ctx, dc, vs.nodeManager, volumeOptions.Zone) + if err != nil { + return "", err + } + // Prepare error msg to be used later, if required. + err = fmt.Errorf("The specified datastore %s does not match the provided availability zones %s", datastore, volumeOptions.Zone) + } + found := false + // Check if the selected datastore belongs to the list of shared datastores computed. + for _, sharedDs := range sharedDsList { + if datastore == sharedDs.Info.Name { + klog.V(4).Infof("Datastore validation succeeded") + found = true + break + } + } + if !found { + klog.Error(err) + return "", err } - } - if !found { - msg := fmt.Sprintf("The specified datastore %s is not a shared datastore across node VMs", datastore) - return "", errors.New(msg) } } ds, err := dc.GetDatastoreByName(ctx, datastore) @@ -1392,12 +1495,12 @@ func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { } if zone.Region == "" { - if vs.cfg.Labels.Region != "" { + if vs.cfg.Labels.Region != DefaultRegionCategoryName { return fmt.Errorf("vSphere region category %q does not match any tags for node %s [%s]", vs.cfg.Labels.Region, nodeName, vs.vmUUID) } } if zone.FailureDomain == "" { - if vs.cfg.Labels.Zone != "" { + if vs.cfg.Labels.Zone != DefaultZoneCategoryName { return fmt.Errorf("vSphere zone category %q does not match any tags for node %s [%s]", vs.cfg.Labels.Zone, nodeName, vs.vmUUID) } } @@ -1418,3 +1521,246 @@ func (vs *VSphere) GetZoneByNodeName(ctx context.Context, nodeName k8stypes.Node func (vs *VSphere) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { return cloudprovider.Zone{}, cloudprovider.NotImplemented } + +func (vs *VSphere) GetVolumeLabels(volumePath string) (map[string]string, error) { + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + datastorePathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath) + if err != nil { + klog.Errorf("Failed to get datastore for volume: %v: %+v", volumePath, err) + return nil, err + } + dsZones, err := vs.GetZonesForDatastore(ctx, datastorePathObj.Datastore) + if err != nil { + klog.Errorf("Failed to get zones for datastore %v: %+v", datastorePathObj.Datastore, err) + return nil, err + } + dsZones, err = vs.collapseZonesInRegion(ctx, dsZones) + // FIXME: For now, pick the first zone of datastore as the zone of volume + labels := make(map[string]string) + if len(dsZones) > 0 { + labels[kubeletapis.LabelZoneRegion] = dsZones[0].Region + labels[kubeletapis.LabelZoneFailureDomain] = dsZones[0].FailureDomain + } + return labels, nil +} + +// collapse all zones in same region. Join FailureDomain with well known separator +func (vs *VSphere) collapseZonesInRegion(ctx context.Context, zones []cloudprovider.Zone) ([]cloudprovider.Zone, error) { + // first create a map of region -> list of zones in that region + regionToZones := make(map[string][]string) + for _, zone := range zones { + fds, exists := regionToZones[zone.Region] + if !exists { + fds = make([]string, 0) + } + regionToZones[zone.Region] = append(fds, zone.FailureDomain) + } + + // Join all fds in same region and return Zone instances + collapsedZones := make([]cloudprovider.Zone, 0) + for region, fds := range regionToZones { + fdSet := sets.NewString(fds...) + appendedZone := volumeutil.ZonesSetToLabelValue(fdSet) + collapsedZones = append(collapsedZones, cloudprovider.Zone{FailureDomain: appendedZone, Region: region}) + } + return collapsedZones, nil +} + +func (vs *VSphere) GetZonesForDatastore(ctx context.Context, datastore string) ([]cloudprovider.Zone, error) { + vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) + if err != nil { + klog.Errorf("Failed to get vSphere instance: %+v", err) + return nil, err + } + dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) + if err != nil { + klog.Errorf("Failed to get datacenter: %+v", err) + return nil, err + } + // get the hosts mounted on this datastore + // datastore -> ["host-1", "host-2", "host-3", ...] + ds, err := dc.GetDatastoreByName(ctx, datastore) + if err != nil { + klog.Errorf("Failed to get datastore by name: %v: %+v", datastore, err) + return nil, err + } + dsHosts, err := ds.GetDatastoreHostMounts(ctx) + if err != nil { + klog.Errorf("Failed to get datastore host mounts for %v: %+v", datastore, err) + return nil, err + } + klog.V(4).Infof("Got host mounts for datastore: %v: %v", datastore, dsHosts) + + // compute map of zone to list of hosts in that zone across all hosts in vsphere + // zone -> ["host-i", "host-j", "host-k", ...] + zoneToHosts, err := vs.GetZoneToHosts(ctx, vsi) + if err != nil { + klog.Errorf("Failed to get zones for hosts: %+v", err) + return nil, err + } + klog.V(4).Infof("Got zone to hosts: %v", zoneToHosts) + + // datastore belongs to a zone if all hosts in that zone mount that datastore + dsZones := make([]cloudprovider.Zone, 0) + for zone, zoneHosts := range zoneToHosts { + // if zone is valid and zoneHosts is a subset of dsHosts, then add zone + if zone.Region != "" && containsAll(dsHosts, zoneHosts) { + dsZones = append(dsZones, zone) + } + } + klog.V(4).Infof("Datastore %s belongs to zones: %v", datastore, dsZones) + return dsZones, nil +} + +// get a map of 'zone' -> 'list of hosts in that zone' in all of given VC +func (vs *VSphere) GetZoneToHosts(ctx context.Context, vsi *VSphereInstance) (map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference, error) { + // Approach is to find tags with the category of 'vs.cfg.Labels.Zone' + zoneToHosts := make(map[cloudprovider.Zone][]vmwaretypes.ManagedObjectReference) + + getHostsInTagCategory := func(ctx context.Context, tagCategoryName string) (map[vmwaretypes.ManagedObjectReference]string, error) { + + hostToTag := make(map[vmwaretypes.ManagedObjectReference]string) + err := withTagsClient(ctx, vsi.conn, func(c *rest.Client) error { + // Look whether the zone/region tag is defined in VC + tagManager := tags.NewManager(c) + tagsForCat, err := tagManager.GetTagsForCategory(ctx, tagCategoryName) + if err != nil { + klog.V(4).Infof("No tags with category %s exists in VC. So ignoring.", tagCategoryName) + // return a special error so that tag unavailability can be ignored + return NoZoneTagInVC + } + klog.V(4).Infof("List of tags under category %s: %v", tagCategoryName, tagsForCat) + + // Each such tag is a different 'zone' marked in vCenter. + // Query for objects associated with each tag. Consider Host, Cluster and Datacenter kind of objects. + tagToObjects := make(map[string][]mo.Reference) + for _, tag := range tagsForCat { + klog.V(4).Infof("Getting objects associated with tag %s", tag.Name) + objects, err := tagManager.ListAttachedObjects(ctx, tag.Name) + if err != nil { + klog.Errorf("Error fetching objects associated with zone tag %s: %+v", tag.Name, err) + return err + } + tagToObjects[tag.Name] = objects + } + klog.V(4).Infof("Map of tag to objects: %v", tagToObjects) + + // Infer zone for hosts within Datacenter, hosts within clusters and hosts - in this order of increasing priority + // The below nested for-loops goes over all the objects in tagToObjects three times over. + for _, moType := range []string{vclib.DatacenterType, vclib.ClusterComputeResourceType, vclib.HostSystemType} { + for tagName, objects := range tagToObjects { + for _, obj := range objects { + if obj.Reference().Type == moType { + klog.V(4).Infof("Found zone tag %s associated with %s of type %T: %s", tagName, obj, obj, obj.Reference().Value) + switch moType { + case "Datacenter": + // mark that all hosts in this datacenter has tag applied + dcObjRef := object.NewReference(vsi.conn.Client, obj.Reference()) + klog.V(4).Infof("Converted mo obj %v to govmomi object ref %v", obj, dcObjRef) + dcObj, ok := dcObjRef.(*object.Datacenter) + if !ok { + errMsg := fmt.Sprintf("Not able to convert object to Datacenter %v", obj) + klog.Errorf(errMsg) + return errors.New(errMsg) + } + klog.V(4).Infof("Converted to object Datacenter %v", dcObj) + dc := vclib.Datacenter{dcObj} + hosts, err := dc.GetAllHosts(ctx) + if err != nil { + klog.Errorf("Could not get hosts from datacenter %v: %+v", dc, err) + return err + } + for _, host := range hosts { + hostToTag[host] = tagName + } + case "ClusterComputeResource": + // mark that all hosts in this cluster has tag applied + clusterObjRef := object.NewReference(vsi.conn.Client, obj.Reference()) + clusterObj, ok := clusterObjRef.(*object.ClusterComputeResource) + if !ok { + errMsg := fmt.Sprintf("Not able to convert object ClusterComputeResource %v", obj) + klog.Errorf(errMsg) + return errors.New(errMsg) + } + hostSystemList, err := clusterObj.Hosts(ctx) + if err != nil { + klog.Errorf("Not able to get hosts in cluster %v: %+v", clusterObj, err) + return err + } + for _, host := range hostSystemList { + hostToTag[host.Reference()] = tagName + } + case "HostSystem": + // mark that this host has tag applied + hostToTag[obj.Reference()] = tagName + } + } + } + } + } + return nil // no error + }) + if err != nil { + klog.Errorf("Error processing tag category %s: %+v", tagCategoryName, err) + return nil, err + } + klog.V(6).Infof("Computed hostToTag: %v", hostToTag) + return hostToTag, nil + } + + hostToZone, err := getHostsInTagCategory(ctx, vs.cfg.Labels.Zone) + if err != nil { + if err == NoZoneTagInVC { + return zoneToHosts, nil + } + klog.Errorf("Get hosts in tag category %s failed: %+v", vs.cfg.Labels.Zone, err) + return nil, err + } + + hostToRegion, err := getHostsInTagCategory(ctx, vs.cfg.Labels.Region) + if err != nil { + if err == NoZoneTagInVC { + return zoneToHosts, nil + } + klog.Errorf("Get hosts in tag category %s failed: %+v", vs.cfg.Labels.Region, err) + return nil, err + } + + // populate zoneToHosts based on hostToZone and hostToRegion + klog.V(6).Infof("hostToZone: %v", hostToZone) + klog.V(6).Infof("hostToRegion: %v", hostToRegion) + for host, zone := range hostToZone { + region, regionExists := hostToRegion[host] + if !regionExists { + klog.Errorf("Host %s has a zone, but no region. So ignoring.", host) + continue + } + cpZone := cloudprovider.Zone{FailureDomain: zone, Region: region} + hosts, exists := zoneToHosts[cpZone] + if !exists { + hosts = make([]vmwaretypes.ManagedObjectReference, 0) + } + zoneToHosts[cpZone] = append(hosts, host) + } + klog.V(4).Infof("Final zoneToHosts: %v", zoneToHosts) + return zoneToHosts, nil +} + +// returns true if s1 contains all elements from s2; false otherwise +func containsAll(s1 []vmwaretypes.ManagedObjectReference, s2 []vmwaretypes.ManagedObjectReference) bool { + // put all elements of s1 into a map + s1Map := make(map[vmwaretypes.ManagedObjectReference]bool) + for _, mor := range s1 { + s1Map[mor] = true + } + // verify if all elements of s2 are present in s1Map + for _, mor := range s2 { + if _, found := s1Map[mor]; !found { + return false + } + } + return true +} diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 04f241a89d398..9b3e400b2c6f7 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -27,8 +27,12 @@ import ( "strings" "time" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + cloudprovider "k8s.io/cloud-provider" "k8s.io/klog" "k8s.io/api/core/v1" @@ -245,6 +249,97 @@ func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storag return datastore, err } +func getDatastoresForZone(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager, selectedZones []string) ([]*vclib.DatastoreInfo, error) { + + var sharedDatastores []*vclib.DatastoreInfo + + // Create cloudprovider.Zone type from selectedZones + var cpZones []cloudprovider.Zone + for _, selectedZone := range selectedZones { + var cpZone cloudprovider.Zone + cpZone.FailureDomain = selectedZone + cpZones = append(cpZones, cpZone) + } + + for _, cpZone := range cpZones { + var sharedDatastoresPerZone []*vclib.DatastoreInfo + hosts, err := nodeManager.GetHostsInZone(ctx, cpZone) + if err != nil { + return nil, err + } + klog.V(4).Infof("Hosts in zone %s : %s", cpZone.FailureDomain, hosts) + + for _, host := range hosts { + var hostSystemMo mo.HostSystem + host.Properties(ctx, host.Reference(), []string{"datastore"}, &hostSystemMo) + klog.V(4).Infof("Datastores mounted on host %s : %s", hostSystemMo, hostSystemMo.Datastore) + var dsRefList []types.ManagedObjectReference + for _, dsRef := range hostSystemMo.Datastore { + dsRefList = append(dsRefList, dsRef) + } + + var dsMoList []mo.Datastore + pc := property.DefaultCollector(host.Client()) + properties := []string{DatastoreInfoProperty} + err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) + if err != nil { + klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + " dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err) + return nil, err + } + klog.V(7).Infof("Datastore mo details: %+v", dsMoList) + + var dsObjList []*vclib.DatastoreInfo + for _, dsMo := range dsMoList { + dsObjList = append(dsObjList, + &vclib.DatastoreInfo{ + &vclib.Datastore{object.NewDatastore(host.Client(), dsMo.Reference()), + nil}, + dsMo.Info.GetDatastoreInfo()}) + } + + klog.V(7).Infof("DatastoreInfo details : %s", dsObjList) + + if len(sharedDatastoresPerZone) == 0 { + sharedDatastoresPerZone = dsObjList + } else { + sharedDatastoresPerZone = intersect(sharedDatastoresPerZone, dsObjList) + } + klog.V(7).Infof("Shared datastore list now : %s", sharedDatastoresPerZone) + } + klog.V(4).Infof("Shared datastore per zone %s is %s", cpZone, sharedDatastoresPerZone) + sharedDatastores = append(sharedDatastores, sharedDatastoresPerZone...) + } + klog.V(1).Infof("Returning selected datastores : %s", sharedDatastores) + return sharedDatastores, nil +} + +func getPbmCompatibleZonedDatastore(ctx context.Context, dc *vclib.Datacenter, storagePolicyName string, zonedDatastores []*vclib.DatastoreInfo) (string, error) { + pbmClient, err := vclib.NewPbmClient(ctx, dc.Client()) + if err != nil { + return "", err + } + storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) + if err != nil { + klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) + return "", err + } + compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, zonedDatastores) + if err != nil { + klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", + zonedDatastores, storagePolicyID, err) + return "", err + } + klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) + datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores) + if err != nil { + klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) + return "", err + } + klog.V(4).Infof("Most free datastore : %+s", datastore) + return datastore, err +} + func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resourcePoolPath string) (*vclib.VMOptions, error) { var vmOptions vclib.VMOptions resourcePool, err := dc.GetResourcePool(ctx, resourcePoolPath) diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index 7097005ef9e16..59d8dea2c17bf 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -161,7 +161,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str // Abstract interface to disk operations. type vdManager interface { // Creates a volume - CreateVolume(provisioner *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) + CreateVolume(provisioner *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error) // Deletes a volume DeleteVolume(deleter *vsphereVolumeDeleter) error } @@ -358,8 +358,14 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) } + klog.V(1).Infof("Provision with allowedTopologies : %s", allowedTopologies) + selectedZones, err := util.ZonesFromAllowedTopologies(allowedTopologies) + if err != nil { + return nil, err + } - volSpec, err := v.manager.CreateVolume(v) + klog.V(4).Infof("Selected zones for volume : %s", selectedZones) + volSpec, err := v.manager.CreateVolume(v, selectedZones.List()) if err != nil { return nil, err } diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index a5880ea7fbf88..f3f844548dcbf 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -83,7 +83,7 @@ func verifyDevicePath(path string) (string, error) { } // CreateVolume creates a vSphere volume. -func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) { +func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error) { var fstype string cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider()) if err != nil { @@ -104,6 +104,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec Name: name, } + volumeOptions.Zone = selectedZone // Apply Parameters (case-insensitive). We leave validation of // the values to the cloud provider. for parameter, value := range v.options.Parameters { diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission.go b/plugin/pkg/admission/storage/persistentvolume/label/admission.go index 4125dd52c3522..a8da2b6e6ddcc 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" vol "k8s.io/kubernetes/pkg/volume" @@ -58,6 +59,7 @@ type persistentVolumeLabel struct { cloudConfig []byte gceCloudProvider *gce.Cloud azureProvider *azure.Cloud + vsphereProvider *vsphere.VSphere } var _ admission.MutationInterface = &persistentVolumeLabel{} @@ -130,6 +132,13 @@ func (l *persistentVolumeLabel) Admit(a admission.Attributes) (err error) { } volumeLabels = labels } + if volume.Spec.VsphereVolume != nil { + labels, err := l.findVsphereVolumeLabels(volume) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("error querying vSphere Volume %s: %v", volume.Spec.VsphereVolume.VolumePath, err)) + } + volumeLabels = labels + } requirements := make([]api.NodeSelectorRequirement, 0) if len(volumeLabels) != 0 { @@ -181,6 +190,46 @@ func (l *persistentVolumeLabel) Admit(a admission.Attributes) (err error) { return nil } +func (l *persistentVolumeLabel) findVsphereVolumeLabels(volume *api.PersistentVolume) (map[string]string, error) { + vsphereProvider, err := l.getVsphereProvider() + if err != nil { + return nil, err + } + if vsphereProvider == nil { + return nil, fmt.Errorf("unable to build vSphere cloud provider") + } + + labels, err := vsphereProvider.GetVolumeLabels(volume.Spec.VsphereVolume.VolumePath) + if err != nil { + return nil, err + } + + return labels, nil +} + +func (l *persistentVolumeLabel) getVsphereProvider() (*vsphere.VSphere, error) { + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.vsphereProvider == nil { + var cloudConfigReader io.Reader + if len(l.cloudConfig) > 0 { + cloudConfigReader = bytes.NewReader(l.cloudConfig) + } + cloudProvider, err := cloudprovider.GetCloudProvider("vsphere", cloudConfigReader) + if err != nil || cloudProvider == nil { + return nil, err + } + vsphereCloudProvider, ok := cloudProvider.(*vsphere.VSphere) + if !ok { + // GetCloudProvider failed + return nil, fmt.Errorf("error retrieving vSphere Cloud Provider") + } + l.vsphereProvider = vsphereCloudProvider + } + return l.vsphereProvider, nil +} + func (l *persistentVolumeLabel) findAWSEBSLabels(volume *api.PersistentVolume) (map[string]string, error) { // Ignore any volumes that are being provisioned if volume.Spec.AWSElasticBlockStore.VolumeID == vol.ProvisionedVolumeName { diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD index 3c23b80976ff9..b5025901ff477 100644 --- a/test/e2e/storage/vsphere/BUILD +++ b/test/e2e/storage/vsphere/BUILD @@ -35,6 +35,7 @@ go_library( "vsphere_volume_placement.go", "vsphere_volume_vpxd_restart.go", "vsphere_volume_vsan_policy.go", + "vsphere_zone_support.go", ], importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere", deps = [ diff --git a/test/e2e/storage/vsphere/bootstrap.go b/test/e2e/storage/vsphere/bootstrap.go index c556af5e6ac6b..fc9f2fccf8d63 100644 --- a/test/e2e/storage/vsphere/bootstrap.go +++ b/test/e2e/storage/vsphere/bootstrap.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -55,5 +55,10 @@ func bootstrapOnce() { if err != nil { framework.Failf("Failed to bootstrap vSphere with error: %v", err) } + // 4. Generate Zone to Datastore mapping + err = TestContext.NodeMapper.GenerateZoneToDatastoreMap() + if err != nil { + framework.Failf("Failed to generate zone to datastore mapping with error: %v", err) + } close(waiting) } diff --git a/test/e2e/storage/vsphere/nodemapper.go b/test/e2e/storage/vsphere/nodemapper.go index 8812212855300..b0e7c6ae49408 100644 --- a/test/e2e/storage/vsphere/nodemapper.go +++ b/test/e2e/storage/vsphere/nodemapper.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,14 @@ import ( "sync" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vapi/tags" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" + + neturl "net/url" ) type NodeMapper struct { @@ -35,11 +40,20 @@ type NodeInfo struct { Name string DataCenterRef types.ManagedObjectReference VirtualMachineRef types.ManagedObjectReference + HostSystemRef types.ManagedObjectReference VSphere *VSphere + Zones []string } +const ( + DatacenterType = "Datacenter" + ClusterComputeResourceType = "ClusterComputeResource" + HostSystemType = "HostSystem" +) + var ( - nameToNodeInfo = make(map[string]*NodeInfo) + nameToNodeInfo = make(map[string]*NodeInfo) + vcToZoneDatastoresMap = make(map[string](map[string][]string)) ) // GenerateNodeMap populates node name to node info map @@ -104,9 +118,11 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node continue } if vm != nil { - framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s", - n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name()) - nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs} + hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference()) + zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef) + framework.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s", + n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name()) + nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones} nm.SetNodeInfo(n.Name, nodeInfo) break } @@ -123,6 +139,144 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node return nil } +// Establish rest connection to retrieve tag manager stub +func withTagsClient(ctx context.Context, connection *VSphere, f func(c *rest.Client) error) error { + c := rest.NewClient(connection.Client.Client) + user := neturl.UserPassword(connection.Config.Username, connection.Config.Password) + if err := c.Login(ctx, user); err != nil { + return err + } + defer c.Logout(ctx) + return f(c) +} + +// Iterates over each node and retrieves the zones in which they are placed +func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSystemRef types.ManagedObjectReference) []string { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var zones []string + pc := connection.Client.ServiceContent.PropertyCollector + withTagsClient(ctx, connection, func(c *rest.Client) error { + client := tags.NewManager(c) + // Example result: ["Host", "Cluster", "Datacenter"] + ancestors, err := mo.Ancestors(ctx, connection.Client, pc, hostSystemRef) + if err != nil { + return err + } + + var validAncestors []mo.ManagedEntity + // Filter out only Datacenter, ClusterComputeResource and HostSystem type objects. These objects will be + // in the following order ["Datacenter" < "ClusterComputeResource" < "HostSystem"] so that the highest + // zone precedance will be received by the HostSystem type. + for _, ancestor := range ancestors { + moType := ancestor.ExtensibleManagedObject.Self.Type + if moType == DatacenterType || moType == ClusterComputeResourceType || moType == HostSystemType { + validAncestors = append(validAncestors, ancestor) + } + } + + for _, ancestor := range validAncestors { + var zonesAttachedToObject []string + tags, err := client.ListAttachedTags(ctx, ancestor) + if err != nil { + return err + } + for _, value := range tags { + tag, err := client.GetTag(ctx, value) + if err != nil { + return err + } + category, err := client.GetCategory(ctx, tag.CategoryID) + if err != nil { + return err + } + switch { + case category.Name == "k8s-zone": + framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName) + zonesAttachedToObject = append(zonesAttachedToObject, tag.Name) + case category.Name == "k8s-region": + framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName) + } + } + // Overwrite zone information if it exists for this object + if len(zonesAttachedToObject) != 0 { + zones = zonesAttachedToObject + } + } + return nil + }) + return zones +} + +// Generate zone to datastore mapping for easily verifying volume placement +func (nm *NodeMapper) GenerateZoneToDatastoreMap() error { + // 1. Create zone to hosts map for each VC + var vcToZoneHostsMap = make(map[string](map[string][]string)) + // 2. Create host to datastores map for each VC + var vcToHostDatastoresMap = make(map[string](map[string][]string)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // 3. Populate vcToZoneHostsMap and vcToHostDatastoresMap + for _, nodeInfo := range nameToNodeInfo { + vc := nodeInfo.VSphere.Config.Hostname + host := nodeInfo.HostSystemRef.Value + for _, zone := range nodeInfo.Zones { + if vcToZoneHostsMap[vc] == nil { + vcToZoneHostsMap[vc] = make(map[string][]string) + } + // Populating vcToZoneHostsMap using the HostSystemRef and Zone fields from each NodeInfo + hosts := vcToZoneHostsMap[vc][zone] + hosts = append(hosts, host) + vcToZoneHostsMap[vc][zone] = hosts + } + if vcToHostDatastoresMap[vc] == nil { + vcToHostDatastoresMap[vc] = make(map[string][]string) + } + datastores := vcToHostDatastoresMap[vc][host] + // Populating vcToHostDatastoresMap by finding out the datastores mounted on node's host + datastoreRefs := nodeInfo.VSphere.GetDatastoresMountedOnHost(ctx, nodeInfo.HostSystemRef) + for _, datastore := range datastoreRefs { + datastores = append(datastores, datastore.Value) + } + vcToHostDatastoresMap[vc][host] = datastores + } + // 4, Populate vcToZoneDatastoresMap from vcToZoneHostsMap and vcToHostDatastoresMap + for vc, zoneToHostsMap := range vcToZoneHostsMap { + for zone, hosts := range zoneToHostsMap { + commonDatastores := retrieveCommonDatastoresAmongHosts(hosts, vcToHostDatastoresMap[vc]) + if vcToZoneDatastoresMap[vc] == nil { + vcToZoneDatastoresMap[vc] = make(map[string][]string) + } + vcToZoneDatastoresMap[vc][zone] = commonDatastores + } + } + framework.Logf("Zone to datastores map : %+v", vcToZoneDatastoresMap) + return nil +} + +// Retrieves the common datastores from the specified hosts +func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string { + var datastoreCountMap = make(map[string]int) + for _, host := range hosts { + for _, datastore := range hostToDatastoresMap[host] { + datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1 + } + } + var commonDatastores []string + numHosts := len(hosts) + for datastore, count := range datastoreCountMap { + if count == numHosts { + commonDatastores = append(commonDatastores, datastore) + } + } + return commonDatastores +} + +// Get all the datastores in the specified zone +func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string { + return vcToZoneDatastoresMap[vc][zone] +} + // GetNodeInfo return NodeInfo for given nodeName func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo { return nameToNodeInfo[nodeName] diff --git a/test/e2e/storage/vsphere/vsphere.go b/test/e2e/storage/vsphere/vsphere.go index f19a2d5f7aa89..c01da27086215 100644 --- a/test/e2e/storage/vsphere/vsphere.go +++ b/test/e2e/storage/vsphere/vsphere.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ import ( "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" "k8s.io/kubernetes/test/e2e/framework" @@ -84,6 +85,33 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref return s.FindByUuid(ctx, datacenter, vmUUID, true, nil) } +// Get host object reference of the host on which the specified VM resides +func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference { + Connect(ctx, vs) + var vmMo mo.VirtualMachine + vs.Client.RetrieveOne(ctx, vm, []string{"summary.runtime.host"}, &vmMo) + host := *vmMo.Summary.Runtime.Host + return host +} + +// Get the datastore references of all the datastores mounted on the specified host +func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference { + Connect(ctx, vs) + var hostMo mo.HostSystem + vs.Client.RetrieveOne(ctx, host, []string{"datastore"}, &hostMo) + return hostMo.Datastore +} + +// Get the datastore reference of the specified datastore +func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) { + Connect(ctx, vs) + datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference()) + finder := find.NewFinder(vs.Client.Client, false) + finder.SetDatacenter(datacenter) + datastore, err := finder.Datastore(ctx, datastoreName) + return datastore.Reference(), err +} + // GetFolderByPath gets the Folder Object Reference from the given folder path // folderPath should be the full path to folder func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) { diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index f09249991f87a..36675586b1606 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { case storageclass4: scParams[Datastore] = datastoreName } - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil)) Expect(sc).NotTo(BeNil(), "Storage class is empty") Expect(err).NotTo(HaveOccurred(), "Failed to create storage class") defer client.StorageV1().StorageClasses().Delete(scname, nil) diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index fae38ac0bf322..e985a46ea6946 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { By("Creating StorageClass for Statefulset") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" - scSpec := getVSphereStorageClassSpec(storageclassname, scParameters) + scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil) sc, err := client.StorageV1().StorageClasses().Create(scSpec) Expect(err).NotTo(HaveOccurred()) defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 260fe84039f16..1acd0eadcdc61 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -85,22 +85,22 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil)) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[Policy_HostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil)) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil)) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName - scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters) + scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } Expect(sc).NotTo(BeNil()) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 443c3366d8100..4e8aeb120497b 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -40,6 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -235,7 +236,7 @@ func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolu framework.Logf("Successfully verified content of the volume") } -func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass { +func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storage.StorageClass { var sc *storage.StorageClass sc = &storage.StorageClass{ @@ -250,6 +251,17 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string) *st if scParameters != nil { sc.Parameters = scParameters } + if zones != nil { + term := v1.TopologySelectorTerm{ + MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ + { + Key: kubeletapis.LabelZoneFailureDomain, + Values: zones, + }, + }, + } + sc.AllowedTopologies = append(sc.AllowedTopologies, term) + } return sc } @@ -399,6 +411,30 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste } } +// verify volumes are created on one of the specified zones +func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) { + for _, pv := range persistentvolumes { + volumePath := pv.Spec.VsphereVolume.VolumePath + // Extract datastoreName from the volume path in the pv spec + // For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk" + datastoreName := volumePath[strings.Index(volumePath, "[")+1 : strings.Index(volumePath, "]")] + nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Get the datastore object reference from the datastore name + datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName) + if err != nil { + Expect(err).NotTo(HaveOccurred()) + } + var allDatastores []string + for _, zone := range zones { + datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone) + allDatastores = append(allDatastores, datastoreInZone...) + } + Expect(allDatastores).To(ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") + } +} + // Get vSphere Volume Path from PVC func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string { pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{}) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index fcb964b7c171b..313d25895f988 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { By("Creating Storage Class With Invalid Datastore") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index ca90fdf09accc..162268b37887a 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -106,7 +106,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st scParameters["diskformat"] = diskFormat By("Creating Storage Class With DiskFormat") - storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters) + storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index 5e58a81bfc3ee..e97d43055f17b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { func invokeInvalidDiskSizeTestNeg(client clientset.Interface, namespace string, scParameters map[string]string, diskSize string) error { By("Creating Storage Class With invalid disk size") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 74ae2a34fe07a..7861d8d685d67 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -146,7 +146,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa } func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil)) Expect(err).NotTo(HaveOccurred()) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index a79699b6f097c..6e0b7aa0b49f4 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", */ It("verify volume status after node power off", func() { By("Creating a Storage Class") - storageClassSpec := getVSphereStorageClassSpec("test-sc", nil) + storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index b4d9d8c4a3dd9..d0f89432beb65 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" - storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters)) + storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil)) Expect(err).NotTo(HaveOccurred()) By("Creating PVCs using the Storage Class") diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index b4fe50ba93646..c5ae24a28e6ab 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -128,22 +128,22 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName var err error switch scname { case storageclass1: - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil)) case storageclass2: var scVSanParameters map[string]string scVSanParameters = make(map[string]string) scVSanParameters[Policy_HostFailuresToTolerate] = "1" - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil)) case storageclass3: var scSPBMPolicyParameters map[string]string scSPBMPolicyParameters = make(map[string]string) scSPBMPolicyParameters[SpbmStoragePolicy] = policyName - sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters)) + sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil)) case storageclass4: var scWithDSParameters map[string]string scWithDSParameters = make(map[string]string) scWithDSParameters[Datastore] = datastoreName - scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters) + scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } Expect(sc).NotTo(BeNil()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 1f74978fc1683..87185437c92dd 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -273,7 +273,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) @@ -305,7 +305,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) @@ -324,7 +324,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { By("Creating Storage Class With storage policy params") - storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters)) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go new file mode 100644 index 0000000000000..a7334d99c08c7 --- /dev/null +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -0,0 +1,358 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +const ( + VsanDatastore1 = "vsanDatastore" + VsanDatastore2 = "vsanDatastore (1)" + CompatPolicy = "compatpolicy" + NonCompatPolicy = "noncompatpolicy" + ZoneA = "zone-a" + ZoneB = "zone-b" + ZoneC = "zone-c" + ZoneD = "zone-d" +) + +/* + Test to verify multi-zone support for dynamic volume provisioning in kubernetes. + The test environment is illustrated below: + + datacenter + --->cluster-vsan-1 (zone-a) -------------------- + --->host-1 : master | | + --->host-2 : node1 | vsanDatastore | + --->host-3 : node2 |____________________| + + + --->cluster-vsan-2 (zone-b) -------------------- + --->host-4 : node3 | | + --->host-5 : node4 | vsanDatastore (1) | + --->host-6 |____________________| + + + --->cluster-3 (zone-c) + --->host-7 : node5 ----------------- + | localDatastore | + | | + ----------------- + + --->host-8 (zone-c) : node6 -------------------- + | localDatastore (1) | + | | + -------------------- + Testbed description : + 1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on it. + 2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on it. + 3. cluster-3 is tagged with zone-c. cluster-3 only contains host-7. + 4. host-8 is not under any cluster and is tagged with zone-c. + 5. Since there are no shared datastores between host-7 under cluster-3 and host-8, no datastores in the environment inherit zone-c. + 6. The six worker nodes are ditributed among the hosts as shown in the above illustration + 7. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1. + Second is a VSAN storage policy named as noncompatpolicy with hostFailuresToTolerate capability set to 4. + + Testsuite description : + 1. Tests to verify that zone labels are set correctly on a dynamically created PV. + 2. Tests to verify dynamic pv creation fails if availability zones are not specified or if there are no shared datastores under the specified zones. + 3. Tests to verify dynamic pv creation using availability zones works in combination with other storage class parameters such as storage policy, + datastore and VSAN capabilities. + 4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy, + datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest. +*/ + +var _ = utils.SIGDescribe("Zone Support", func() { + f := framework.NewDefaultFramework("zone-support") + var ( + client clientset.Interface + namespace string + scParameters map[string]string + zones []string + ) + BeforeEach(func() { + framework.SkipUnlessProviderIs("vsphere") + Bootstrap(f) + client = f.ClientSet + namespace = f.Namespace.Name + scParameters = make(map[string]string) + zones = make([]string, 0) + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + if !(len(nodeList.Items) > 0) { + framework.Failf("Unable to find ready and schedulable Node") + } + }) + + It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { + By(fmt.Sprintf("Creating storage class with the following zones : %s", ZoneA)) + zones = append(zones, ZoneA) + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) + defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + + By("Creating PVC using the storage class") + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) + By("Waiting for claim to be in bound phase") + persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Verify zone information is present in the volume labels") + for _, pv := range persistentvolumes { + Expect(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"]).To(Equal(ZoneA), "Incorrect or missing zone labels in pv.") + } + }) + + It("Verify PVC creation with invalid zone specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with unknown zone : %s", ZoneD)) + zones = append(zones, ZoneD) + err := verifyPVCCreationFails(client, namespace, nil, zones) + Expect(err).To(HaveOccurred()) + errorMsg := "Failed to find a shared datastore matching zone [" + ZoneD + "]" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { + By(fmt.Sprintf("Creating storage class with zones :%s", ZoneA)) + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { + By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", ZoneA, VsanDatastore1)) + scParameters[Datastore] = VsanDatastore1 + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) + }) + + It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", ZoneC, VsanDatastore1)) + scParameters[Datastore] = VsanDatastore1 + zones = append(zones, ZoneC) + err := verifyPVCCreationFails(client, namespace, scParameters, zones) + errorMsg := "The specified datastore " + scParameters[Datastore] + " is not a shared datastore across node VMs or does not match the provided availability zones : [" + ZoneC + "]" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", ZoneA, CompatPolicy)) + scParameters[SpbmStoragePolicy] = CompatPolicy + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) + }) + + It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", ZoneA, NonCompatPolicy)) + scParameters[SpbmStoragePolicy] = NonCompatPolicy + zones = append(zones, ZoneA) + err := verifyPVCCreationFails(client, namespace, scParameters, zones) + errorMsg := "No compatible datastores found that satisfy the storage policy requirements" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { + By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", ZoneA, VsanDatastore1, CompatPolicy)) + scParameters[SpbmStoragePolicy] = CompatPolicy + scParameters[Datastore] = VsanDatastore1 + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) + }) + + It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", ZoneA, VsanDatastore1, NonCompatPolicy)) + scParameters[SpbmStoragePolicy] = NonCompatPolicy + scParameters[Datastore] = VsanDatastore1 + zones = append(zones, ZoneA) + err := verifyPVCCreationFails(client, namespace, scParameters, zones) + errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + NonCompatPolicy + "\\\"." + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", ZoneC, VsanDatastore2, CompatPolicy)) + scParameters[SpbmStoragePolicy] = CompatPolicy + scParameters[Datastore] = VsanDatastore2 + zones = append(zones, ZoneC) + err := verifyPVCCreationFails(client, namespace, scParameters, zones) + errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided availability zones : [" + ZoneC + "]" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { + By(fmt.Sprintf("Creating storage class with no zones")) + err := verifyPVCCreationFails(client, namespace, nil, nil) + errorMsg := "No shared datastores found in the Kubernetes cluster" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { + By(fmt.Sprintf("Creating storage class with datastore :%s", VsanDatastore1)) + scParameters[Datastore] = VsanDatastore1 + err := verifyPVCCreationFails(client, namespace, scParameters, nil) + errorMsg := "No shared datastores found in the Kubernetes cluster" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { + By(fmt.Sprintf("Creating storage class with storage policy :%s", CompatPolicy)) + scParameters[SpbmStoragePolicy] = CompatPolicy + err := verifyPVCCreationFails(client, namespace, scParameters, nil) + errorMsg := "No shared datastores found in the Kubernetes cluster" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { + By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", CompatPolicy, VsanDatastore1)) + scParameters[SpbmStoragePolicy] = CompatPolicy + scParameters[Datastore] = VsanDatastore1 + err := verifyPVCCreationFails(client, namespace, scParameters, nil) + errorMsg := "No shared datastores found in the Kubernetes cluster" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { + By(fmt.Sprintf("Creating storage class with zone :%s", ZoneC)) + zones = append(zones, ZoneC) + err := verifyPVCCreationFails(client, namespace, nil, zones) + errorMsg := "Failed to find a shared datastore matching zone [" + ZoneC + "]" + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class (One zone has shared datastores and other does not)", func() { + By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", ZoneA, ZoneC)) + zones = append(zones, ZoneC) + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on the multiple zones specified in the storage class. (PV can be created on either of the zones specified)", func() { + By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", ZoneA, ZoneB)) + zones = append(zones, ZoneA) + zones = append(zones, ZoneB) + verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) + }) + + It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { + By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, ZoneA)) + scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal + zones = append(zones, ZoneA) + err := verifyPVCCreationFails(client, namespace, scParameters, zones) + errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." + if !strings.Contains(err.Error(), errorMsg) { + Expect(err).NotTo(HaveOccurred(), errorMsg) + } + }) + + It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability and compatible zone specified in storage class", func() { + By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, HostFailuresToTolerateCapabilityInvalidVal, ZoneA)) + scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal + scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal + zones = append(zones, ZoneA) + verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) + }) +}) + +func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) + defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + + By("Creating PVC using the Storage Class") + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) + By("Waiting for claim to be in bound phase") + persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pod to attach PV to the node") + pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") + Expect(err).NotTo(HaveOccurred()) + + By("Verify persistent volume was created on the right zone") + verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) + + By("Verify the volume is accessible and available in the pod") + verifyVSphereVolumesAccessible(client, pod, persistentvolumes) + + By("Deleting pod") + framework.DeletePodWithWait(f, client, pod) + + By("Waiting for volumes to be detached from the node") + waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) +} + +func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error { + storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones)) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) + defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) + + By("Creating PVC using the Storage Class") + pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) + Expect(err).NotTo(HaveOccurred()) + defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) + + var pvclaims []*v1.PersistentVolumeClaim + pvclaims = append(pvclaims, pvclaim) + + By("Waiting for claim to be in bound phase") + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) + Expect(err).To(HaveOccurred()) + + eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) + framework.Logf("Failure message : %+q", eventList.Items[0].Message) + return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) +}