Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why you added only inports in this class ?

Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@
import org.springframework.web.bind.annotation.RequestHeader;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;

import java.net.URI;
import java.util.Map;

@FeignClient(name = "SvmClient", url = "https://{clusterIP}/api/svm/svms", configuration = FeignConfiguration.class)
public interface SvmFeignClient {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,21 +30,23 @@
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMethod;

import java.net.URI;


@Lazy
@FeignClient(name = "VolumeClient", url = "https://{clusterIP}/api/storage/volumes", configuration = FeignConfiguration.class)
public interface VolumeFeignClient {

@RequestMapping(method = RequestMethod.DELETE, value="/{uuid}")
void deleteVolume(@RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
void deleteVolume(URI baseURL, @RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

with this change, why do we need to add url parameter at class level annotation?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we don't pass it explicitly, we need to set a property statically in application.properties or somewhere like that, which in our case is not feasible. Also, user might ask us to deal with multiple filers, so, the IP wouldn't be static


@RequestMapping(method = RequestMethod.POST)
JobResponse createVolumeWithJob(@RequestHeader("Authorization") String authHeader, @RequestBody Volume volumeRequest);
JobResponse createVolumeWithJob(URI baseURL, @RequestHeader("Authorization") String authHeader, @RequestBody Volume volumeRequest);

@RequestMapping(method = RequestMethod.GET, value="/{uuid}")
Volume getVolumeByUUID(@RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);
Volume getVolumeByUUID(URI baseURL, @RequestHeader("Authorization") String authHeader, @PathVariable("uuid") String uuid);

@RequestMapping(method = RequestMethod.PATCH)
JobResponse updateVolumeRebalancing(@RequestHeader("accept") String acceptHeader, @PathVariable("uuid") String uuid, @RequestBody Volume volumeRequest);
JobResponse updateVolumeRebalancing(URI baseURL, @RequestHeader("accept") String acceptHeader, @PathVariable("uuid") String uuid, @RequestBody Volume volumeRequest);

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.cloudstack.storage.feign.model;

import org.apache.cloudstack.storage.utils.Constants.ProtocolType;

public class OntapStorage {
public static String _username;
public static String _password;
public static String _managementLIF;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lets use camel casing for the variable name

public static String _svmName;
public static ProtocolType _protocolType;
public static Boolean _isDisaggregated;

public OntapStorage(String username, String password, String managementLIF, String svmName, ProtocolType protocolType, Boolean isDisaggregated) {
_username = username;
_password = password;
_managementLIF = managementLIF;
_svmName = svmName;
_protocolType = protocolType;
_isDisaggregated = isDisaggregated;
}

public String getUsername() {
return _username;
}

public void setUsername(String username) {
_username = username;
}

public String getPassword() {
return _password;
}

public void setPassword(String password) {
_password = password;
}

public String getManagementLIF() {
return _managementLIF;
}

public void setManagementLIF(String managementLIF) {
_managementLIF = managementLIF;
}

public String getSvmName() {
return _svmName;
}

public void setSvmName(String svmName) {
_svmName = svmName;
}

public ProtocolType getProtocol() {
return _protocolType;
}

public void setProtocol(ProtocolType protocolType) {
_protocolType = protocolType;
}

public Boolean getIsDisaggregated() {
return _isDisaggregated;
}

public void setIsDisaggregated(Boolean isDisaggregated) {
_isDisaggregated = isDisaggregated;
}
}

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not seeing any significant changes here, better remove this class from the PR

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This came in as part of rebase. It just has some space changes. Sure, will try to revert it

Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import java.util.List;

/**
* OnTapResponse
* OntapResponse
*/
@JsonInclude(JsonInclude.Include.NON_NULL)
public class OntapResponse<T> {
Expand Down Expand Up @@ -59,4 +59,4 @@ public void setRecords(List<T> records) {
this.records = records;
this.numRecords = (records != null) ? records.size() : 0;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,43 @@


import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.host.HostVO;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.resource.ResourceManager;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.utils.exception.CloudRuntimeException;
import com.google.common.base.Preconditions;
import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
import org.apache.cloudstack.storage.feign.model.OntapStorage;
import org.apache.cloudstack.storage.provider.StorageProviderFactory;
import org.apache.cloudstack.storage.service.StorageStrategy;
import org.apache.cloudstack.storage.utils.Constants;
import org.apache.cloudstack.storage.utils.Constants.ProtocolType;
import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import java.util.Map;

public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle {
import javax.inject.Inject;
import java.util.List;
import java.util.Map;
import java.util.UUID;

public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
@Inject private ClusterDao _clusterDao;
@Inject private StorageManager _storageMgr;
@Inject private ResourceManager _resourceMgr;
@Inject private PrimaryDataStoreHelper _dataStoreHelper;
private static final Logger s_logger = (Logger)LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);

/**
Expand All @@ -43,14 +67,120 @@ public class OntapPrimaryDatastoreLifecycle implements PrimaryDataStoreLifeCycle
*/
@Override
public DataStore initialize(Map<String, Object> dsInfos) {

return null;

if (dsInfos == null) {
throw new CloudRuntimeException("Datastore info map is null, cannot create primary storage");
}
String url = dsInfos.get("url").toString(); // TODO: Decide on whether should the customer enter just the Management LIF IP or https://ManagementLIF
Long zoneId = dsInfos.get("zoneId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("zoneId");
Long podId = dsInfos.get("podId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("zoneId");
Long clusterId = dsInfos.get("clusterId").toString().trim().isEmpty() ? null : (Long)dsInfos.get("clusterId");
String storagePoolName = dsInfos.get("name").toString().trim();
String providerName = dsInfos.get("providerName").toString().trim();
String tags = dsInfos.get("tags").toString().trim();
Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
String scheme = dsInfos.get("scheme").toString();

s_logger.info("Creating ONTAP primary storage pool with name: " + storagePoolName + ", provider: " + providerName +
", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId + ", protocol: " + scheme);

// Additional details requested for ONTAP primary storage pool creation
@SuppressWarnings("unchecked")
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
// Validations
if (podId == null ^ clusterId == null) {
throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage");
}

if (podId == null && clusterId == null) {
if (zoneId != null) {
s_logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
} else {
throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
}
}

if (storagePoolName == null || storagePoolName.isEmpty()) {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

empty check on the sting parameters must use trim method as well

throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage");
}

if (providerName == null || providerName.isEmpty()) {
throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage");
}

PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
if (clusterId != null) {
Copy link

@suryag1201 suryag1201 Oct 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why this check is required again? it is already checked above

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Customer has an option to not send both cluster and pod ids, in case of creating a zone level primary storage pool.

ClusterVO clusterVO = _clusterDao.findById(clusterId);
Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor");
}
parameters.setHypervisorType(clusterVO.getHypervisorType());
}

// TODO: While testing need to check what does this actually do and if the fields corresponding to each protocol should also be set
// TODO: scheme could be 'custom' in our case and we might have to ask 'protocol' separately to the user
ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase());
switch (protocol) {
case NFS:
parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
break;
case ISCSI:
parameters.setType(Storage.StoragePoolType.Iscsi);
break;
default:
throw new CloudRuntimeException("Unsupported protocol: " + scheme + ", cannot create primary storage");
}

details.put(Constants.MANAGEMENT_LIF, url);

// Validate the ONTAP details
if(details.get(Constants.IS_DISAGGREGATED) == null || details.get(Constants.IS_DISAGGREGATED).isEmpty()) {
details.put(Constants.IS_DISAGGREGATED, "false");
}

OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
details.get(Constants.MANAGEMENT_LIF), details.get(Constants.SVM_NAME), protocol,
Boolean.parseBoolean(details.get(Constants.IS_DISAGGREGATED)));
StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
boolean isValid = storageStrategy.connect();
if (isValid) {
// String volumeName = storagePoolName + "_vol"; //TODO: Figure out a better naming convention
storageStrategy.createVolume(storagePoolName, Long.parseLong((details.get("size")))); // TODO: size should be in bytes, so see if conversion is needed
} else {
throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
}

parameters.setTags(tags);
parameters.setIsTagARule(isTagARule);
parameters.setDetails(details);
parameters.setUuid(UUID.randomUUID().toString());
parameters.setZoneId(zoneId);
parameters.setPodId(podId);
parameters.setClusterId(clusterId);
parameters.setName(storagePoolName);
parameters.setProviderName(providerName);
parameters.setManaged(true);

return _dataStoreHelper.createPrimaryDataStore(parameters);
}

@Override
public boolean attachCluster(DataStore store, ClusterScope scope) {
return false;
public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
logger.debug("In attachCluster for ONTAP primary storage");
PrimaryDataStoreInfo primarystore = (PrimaryDataStoreInfo)dataStore;
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primarystore);

logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primarystore.getClusterId()));
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
_dataStoreHelper.attachCluster(dataStore);
return true;
}

@Override
Expand All @@ -60,7 +190,20 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis

@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
return false;
logger.debug("In attachZone for ONTAP primary storage");
List<HostVO> hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);

logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
for (HostVO host : hostsToConnect) {
// TODO: Fetch the host IQN and add to the initiator group on ONTAP cluster
try {
_storageMgr.connectHostToSharedPool(host, dataStore.getId());
} catch (Exception e) {
logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
}
}
_dataStoreHelper.attachZone(dataStore);
return true;
}

@Override
Expand Down
Loading
Loading