Skip to content

Add logs for storage pools reordering #10419

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,10 @@
storageType = "shared";
}

logger.debug(String.format(
"Filtering storage pools by capacity type [%s] as the first storage pool of the list, with name [%s] and ID [%s], is a [%s] storage.",
logger.info(
"Filtering storage pools by capacity type [{}] as the first storage pool of the list, with name [{}] and ID [{}], is a [{}] storage.",
capacityType, storagePool.getName(), storagePool.getUuid(), storageType
));
);

Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderHostsByFreeCapacity(zoneId, clusterId, capacityType);
List<Long> poolIdsByCapacity = result.first();
Expand Down Expand Up @@ -185,7 +185,7 @@
Long clusterId = plan.getClusterId();

List<Long> poolIdsByVolCount = volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
logger.debug(String.format("List of pools in ascending order of number of volumes for account [%s] is [%s].", account, poolIdsByVolCount));
logger.debug("List of pools in ascending order of number of volumes for account [{}] is [{}].", account, poolIdsByVolCount);

// now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<>();
Expand All @@ -206,16 +206,11 @@

@Override
public List<StoragePool> reorderPools(List<StoragePool> pools, VirtualMachineProfile vmProfile, DeploymentPlan plan, DiskProfile dskCh) {
if (logger.isTraceEnabled()) {
logger.trace("reordering pools");
}
if (pools == null) {
logger.trace("There are no pools to reorder; returning null.");
logger.info("There are no pools to reorder.");

Check warning on line 210 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L210

Added line #L210 was not covered by tests
return null;
}
if (logger.isTraceEnabled()) {
logger.trace(String.format("reordering %d pools", pools.size()));
}
logger.info("Reordering [{}] pools", pools.size());

Check warning on line 213 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L213

Added line #L213 was not covered by tests
Account account = null;
if (vmProfile.getVirtualMachine() != null) {
account = vmProfile.getOwner();
Expand All @@ -224,9 +219,7 @@
pools = reorderStoragePoolsBasedOnAlgorithm(pools, plan, account);

if (vmProfile.getVirtualMachine() == null) {
if (logger.isTraceEnabled()) {
logger.trace("The VM is null, skipping pools reordering by disk provisioning type.");
}
logger.info("The VM is null, skipping pool reordering by disk provisioning type.");

Check warning on line 222 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L222

Added line #L222 was not covered by tests
return pools;
}

Expand All @@ -240,14 +233,10 @@

List<StoragePool> reorderStoragePoolsBasedOnAlgorithm(List<StoragePool> pools, DeploymentPlan plan, Account account) {
String volumeAllocationAlgorithm = VolumeOrchestrationService.VolumeAllocationAlgorithm.value();
logger.debug("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
logger.info("Using volume allocation algorithm {} to reorder pools.", volumeAllocationAlgorithm);
if (volumeAllocationAlgorithm.equals("random") || volumeAllocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
reorderRandomPools(pools);
} else if (StringUtils.equalsAny(volumeAllocationAlgorithm, "userdispersing", "firstfitleastconsumed")) {
if (logger.isTraceEnabled()) {
logger.trace("Using reordering algorithm {}", volumeAllocationAlgorithm);
}

if (volumeAllocationAlgorithm.equals("userdispersing")) {
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
} else {
Expand All @@ -259,16 +248,15 @@

void reorderRandomPools(List<StoragePool> pools) {
StorageUtil.traceLogStoragePools(pools, logger, "pools to choose from: ");
if (logger.isTraceEnabled()) {
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
}
StorageUtil.traceLogStoragePools(pools, logger, "pools to shuffle: ");
logger.trace("Shuffle this so that we don't check the pools in the same order. Algorithm == 'random' (or no account?)");
logger.debug("Pools to shuffle: [{}]", pools);
Collections.shuffle(pools, secureRandom);
StorageUtil.traceLogStoragePools(pools, logger, "shuffled list of pools to choose from: ");
logger.debug("Shuffled list of pools to choose from: [{}]", pools);
}

private List<StoragePool> reorderPoolsByDiskProvisioningType(List<StoragePool> pools, DiskProfile diskProfile) {
if (diskProfile != null && diskProfile.getProvisioningType() != null && !diskProfile.getProvisioningType().equals(Storage.ProvisioningType.THIN)) {
logger.info("Reordering [{}] pools by disk provisioning type [{}].", pools.size(), diskProfile.getProvisioningType());

Check warning on line 259 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L259

Added line #L259 was not covered by tests
List<StoragePool> reorderedPools = new ArrayList<>();
int preferredIndex = 0;
for (StoragePool pool : pools) {
Expand All @@ -282,22 +270,28 @@
reorderedPools.add(preferredIndex++, pool);
}
}
logger.debug("Reordered list of pools by disk provisioning type [{}]: [{}]", diskProfile.getProvisioningType(), pools);

Check warning on line 273 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L273

Added line #L273 was not covered by tests
return reorderedPools;
} else {
if (diskProfile == null) {
logger.info("Reordering pools by disk provisioning type wasn't necessary, since no disk profile was found.");

Check warning on line 277 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L277

Added line #L277 was not covered by tests
} else {
logger.debug("Reordering pools by disk provisioning type wasn't necessary, since the provisioning type is [{}].", diskProfile.getProvisioningType());

Check warning on line 279 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L279

Added line #L279 was not covered by tests
}
return pools;
}
}

protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, DeploymentPlan plan) {
logger.debug(String.format("Checking if storage pool [%s] is suitable to disk [%s].", pool, dskCh));
logger.debug("Checking if storage pool [{}] is suitable to disk [{}].", pool, dskCh);

Check warning on line 286 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L286

Added line #L286 was not covered by tests
if (avoid.shouldAvoid(pool)) {
logger.debug(String.format("StoragePool [%s] is in avoid set, skipping this pool to allocation of disk [%s].", pool, dskCh));
logger.debug("StoragePool [{}] is in avoid set, skipping this pool to allocation of disk [{}].", pool, dskCh);

Check warning on line 288 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L288

Added line #L288 was not covered by tests
return false;
}

if (dskCh.requiresEncryption() && !pool.getPoolType().supportsEncryption()) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("Storage pool type '%s' doesn't support encryption required for volume, skipping this pool", pool.getPoolType()));
logger.debug("Storage pool type '[{}]' doesn't support encryption required for volume, skipping this pool", pool.getPoolType());

Check warning on line 294 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L294

Added line #L294 was not covered by tests
}
return false;
}
Expand All @@ -319,8 +313,8 @@
}

if (!checkDiskProvisioningSupport(dskCh, pool)) {
logger.debug(String.format("Storage pool [%s] does not have support to disk provisioning of disk [%s].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType")));
logger.debug("Storage pool [{}] does not have support to disk provisioning of disk [{}].", pool, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dskCh,

Check warning on line 316 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L316

Added line #L316 was not covered by tests
"type", "name", "diskOfferingId", "templateId", "volumeId", "provisioningType", "hyperType"));
return false;
}

Expand All @@ -332,7 +326,7 @@
HostVO plannedHost = hostDao.findById(plan.getHostId());
if (!storageMgr.checkIfHostAndStoragePoolHasCommonStorageAccessGroups(plannedHost, pool)) {
if (logger.isDebugEnabled()) {
logger.debug(String.format("StoragePool %s and host %s does not have matching storage access groups", pool, plannedHost));
logger.debug("StoragePool [{}] and host [{}] does not have matching storage access groups", pool, plannedHost);

Check warning on line 329 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L329

Added line #L329 was not covered by tests
}
return false;
}
Expand All @@ -343,13 +337,13 @@
if (!isTempVolume) {
volume = volumeDao.findById(dskCh.getVolumeId());
if (!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) {
logger.debug(String.format("Pool [%s] is not compatible with volume [%s], skipping it.", pool, volume));
logger.debug("Pool [{}] is not compatible with volume [{}], skipping it.", pool, volume);

Check warning on line 340 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L340

Added line #L340 was not covered by tests
return false;
}
}

if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) {
logger.debug(String.format("Cannot allocate pool [%s] to volume [%s] because the max number of managed clustered filesystems has been exceeded.", pool, volume));
logger.debug("Cannot allocate pool [{}] to volume [{}] because the max number of managed clustered filesystems has been exceeded.", pool, volume);

Check warning on line 346 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L346

Added line #L346 was not covered by tests
return false;
}

Expand All @@ -358,13 +352,13 @@
requestVolumeDiskProfilePairs.add(new Pair<>(volume, dskCh));
if (dskCh.getHypervisorType() == HypervisorType.VMware) {
if (pool.getPoolType() == Storage.StoragePoolType.DatastoreCluster && storageMgr.isStoragePoolDatastoreClusterParent(pool)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is a parent datastore cluster.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is a parent datastore cluster.", pool, volume);

Check warning on line 355 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L355

Added line #L355 was not covered by tests
return false;
}
if (pool.getParent() != 0L) {
StoragePoolVO datastoreCluster = storagePoolDao.findById(pool.getParent());
if (datastoreCluster == null || (datastoreCluster != null && datastoreCluster.getStatus() != StoragePoolStatus.Up)) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not in [%s] state.", datastoreCluster, volume, StoragePoolStatus.Up));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not in [{}] state.", datastoreCluster, volume, StoragePoolStatus.Up);

Check warning on line 361 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L361

Added line #L361 was not covered by tests
return false;
}
}
Expand All @@ -374,11 +368,11 @@
storageMgr.isStoragePoolCompliantWithStoragePolicy(dskCh.getDiskOfferingId(), pool) :
storageMgr.isStoragePoolCompliantWithStoragePolicy(requestVolumeDiskProfilePairs, pool);
if (!isStoragePoolStoragePolicyCompliance) {
logger.debug(String.format("Skipping allocation of pool [%s] to volume [%s] because this pool is not compliant with the storage policy required by the volume.", pool, volume));
logger.debug("Skipping allocation of pool [{}] to volume [{}] because this pool is not compliant with the storage policy required by the volume.", pool, volume);

Check warning on line 371 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L371

Added line #L371 was not covered by tests
return false;
}
} catch (StorageUnavailableException e) {
logger.warn(String.format("Could not verify storage policy complaince against storage pool %s due to exception %s", pool.getUuid(), e.getMessage()));
logger.warn("Could not verify storage policy compliance against storage pool [{}] due to exception [{}]", pool.getUuid(), e.getMessage());

Check warning on line 375 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L375

Added line #L375 was not covered by tests
return false;
}
}
Expand Down Expand Up @@ -427,19 +421,19 @@
protected void logDisabledStoragePools(long dcId, Long podId, Long clusterId, ScopeType scope) {
List<StoragePoolVO> disabledPools = storagePoolDao.findDisabledPoolsByScope(dcId, podId, clusterId, scope);
if (disabledPools != null && !disabledPools.isEmpty()) {
logger.trace(String.format("Ignoring pools [%s] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools)));
logger.trace("Ignoring pools [{}] as they are in disabled state.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(disabledPools));

Check warning on line 424 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L424

Added line #L424 was not covered by tests
}
}

protected void logStartOfSearch(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, int returnUpTo,
boolean bypassStorageTypeCheck){
logger.trace(String.format("%s is looking for storage pools that match the VM's disk profile [%s], virtual machine profile [%s] and "
+ "deployment plan [%s]. Returning up to [%d] and bypassStorageTypeCheck [%s].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck));
logger.trace("[{}] is looking for storage pools that match the VM's disk profile [{}], virtual machine profile [{}] and "
+ "deployment plan [{}]. Returning up to [{}] and bypassStorageTypeCheck [{}].", this.getClass().getSimpleName(), dskCh, vmProfile, plan, returnUpTo, bypassStorageTypeCheck);

Check warning on line 431 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L430-L431

Added lines #L430 - L431 were not covered by tests
}

protected void logEndOfSearch(List<StoragePool> storagePoolList) {
logger.debug(String.format("%s is returning [%s] suitable storage pools [%s].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray())));
logger.debug("[{}] is returning [{}] suitable storage pools [{}].", this.getClass().getSimpleName(), storagePoolList.size(),
Arrays.toString(storagePoolList.toArray()));

Check warning on line 436 in engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java

View check run for this annotation

Codecov / codecov/patch

engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java#L435-L436

Added lines #L435 - L436 were not covered by tests
}

}
Loading