diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aeb701bbcfbd..1c6c90a61837 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,7 +33,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up JDK 11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: '11' distribution: 'adopt' diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c4d79843d3d7..c4196f06d4b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -211,7 +211,7 @@ jobs: fetch-depth: 0 - name: Set up JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: '11' distribution: 'adopt' diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 430d62df8def..f7b28fdeecc5 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -37,7 +37,7 @@ jobs: fetch-depth: 0 - name: Set up JDK11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/main-sonar-check.yml b/.github/workflows/main-sonar-check.yml index cc27309f8a53..66bb1093e040 100644 --- a/.github/workflows/main-sonar-check.yml +++ b/.github/workflows/main-sonar-check.yml @@ -37,7 +37,7 @@ jobs: fetch-depth: 0 - name: Set up JDK11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/.github/workflows/rat.yml b/.github/workflows/rat.yml index 64fa4c3da0ca..b8f83de81940 100644 --- a/.github/workflows/rat.yml +++ b/.github/workflows/rat.yml @@ -32,7 +32,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set up JDK 11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: java-version: '11' distribution: 'adopt' diff --git a/.github/workflows/sonar-check.yml b/.github/workflows/sonar-check.yml index a8282f251454..2ebcf1fb2db7 100644 --- a/.github/workflows/sonar-check.yml +++ b/.github/workflows/sonar-check.yml @@ -39,7 +39,7 @@ jobs: fetch-depth: 0 - name: Set up JDK11 - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '11' diff --git a/INSTALL.md b/INSTALL.md index 620fc1833a82..6586e4e57fc2 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -15,7 +15,7 @@ was tested against a CentOS 7 x86_64 setup. Install tools and dependencies used for development: - # yum -y install git java-11-openjdk java-11-openjdk-devel \ + # yum -y install git java-17-openjdk java-17-openjdk-devel \ mysql mysql-server mkisofs git gcc python MySQL-python openssh-clients wget Set up Maven (3.6.0): diff --git a/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java b/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java index e9f706ac1cee..354f9cfaac53 100644 --- a/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java @@ -21,8 +21,12 @@ import java.util.HashSet; import java.util.Set; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + import com.cloud.dc.DataCenter; import com.cloud.dc.Pod; +import com.cloud.exception.CloudException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.ResourceUnavailableException; @@ -75,7 +79,7 @@ public enum PlannerResourceUsage { public static class ExcludeList implements Serializable { private static final long serialVersionUID = -482175549460148301L; - + protected static Logger LOGGER = LogManager.getLogger(ExcludeList.class); private Set _dcIds; private Set _podIds; private Set _clusterIds; @@ -104,13 +108,26 @@ public ExcludeList(Set dcIds, Set podIds, Set clusterIds, Set< } } + private void logAvoid(Class scope, CloudException e) { + Long id = null; + if (e instanceof InsufficientCapacityException) { + id = ((InsufficientCapacityException) e).getId(); + } else if (e instanceof ResourceUnavailableException) { + id = ((ResourceUnavailableException) e).getResourceId(); + } else { + LOGGER.debug("Failed to log avoided component due to unexpected exception type [{}].", e.getMessage()); + return; + } + LOGGER.debug("Adding {} [{}] to the avoid set due to [{}].", scope.getSimpleName(), id, e.getMessage()); + } + public boolean add(InsufficientCapacityException e) { Class scope = e.getScope(); if (scope == null) { return false; } - + logAvoid(scope, e); if (Host.class.isAssignableFrom(scope)) { addHost(e.getId()); } else if (Pod.class.isAssignableFrom(scope)) { @@ -128,13 +145,14 @@ public boolean add(InsufficientCapacityException e) { return true; } + public boolean add(ResourceUnavailableException e) { Class scope = e.getScope(); if (scope == null) { return false; } - + logAvoid(scope, e); if (Host.class.isAssignableFrom(scope)) { addHost(e.getResourceId()); } else if (Pod.class.isAssignableFrom(scope)) { diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 5d5252290959..c4833d3433ae 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -303,6 +303,7 @@ public class EventTypes { public static final String EVENT_VOLUME_CREATE = "VOLUME.CREATE"; public static final String EVENT_VOLUME_DELETE = "VOLUME.DELETE"; public static final String EVENT_VOLUME_ATTACH = "VOLUME.ATTACH"; + public static final String EVENT_VOLUME_CHECK = "VOLUME.CHECK"; public static final String EVENT_VOLUME_DETACH = "VOLUME.DETACH"; public static final String EVENT_VOLUME_EXTRACT = "VOLUME.EXTRACT"; public static final String EVENT_VOLUME_UPLOAD = "VOLUME.UPLOAD"; diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 8d5f7892f102..a673df12d0f4 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -22,9 +22,11 @@ import java.util.List; import java.util.Map; +import com.cloud.utils.Pair; import org.apache.cloudstack.api.command.user.volume.AssignVolumeCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ChangeOfferingForVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd; @@ -178,4 +180,6 @@ Snapshot takeSnapshot(Long volumeId, Long policyId, Long snapshotId, Account acc void publishVolumeCreationUsageEvent(Volume volume); boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException; + + Pair checkAndRepairVolume(CheckAndRepairVolumeCmd cmd) throws ResourceAllocationException; } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 9f959db9262b..416072f1210a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -380,6 +380,7 @@ public class ApiConstants { public static final String RECEIVED_BYTES = "receivedbytes"; public static final String RECONNECT = "reconnect"; public static final String RECOVER = "recover"; + public static final String REPAIR = "repair"; public static final String REQUIRES_HVM = "requireshvm"; public static final String RESOURCE_COUNT = "resourcecount"; public static final String RESOURCE_NAME = "resourcename"; @@ -506,6 +507,9 @@ public class ApiConstants { public static final String IS_VOLATILE = "isvolatile"; public static final String VOLUME_ID = "volumeid"; public static final String VOLUMES = "volumes"; + public static final String VOLUME_CHECK_RESULT = "volumecheckresult"; + public static final String VOLUME_REPAIR_RESULT = "volumerepairresult"; + public static final String ZONE = "zone"; public static final String ZONE_ID = "zoneid"; public static final String ZONE_NAME = "zonename"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java index 560e449412c2..ddf21affb535 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DeleteUserCmd.java @@ -84,11 +84,10 @@ public ApiCommandResourceType getApiResourceType() { public void execute() { CallContext.current().setEventDetails("UserId: " + getId()); boolean result = _regionService.deleteUser(this); - if (result) { - SuccessResponse response = new SuccessResponse(getCommandName()); - this.setResponseObject(response); - } else { + if (!result) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete user"); } + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java index 7f583fe225af..7ab0b053004c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/DeleteVlanIpRangeCmd.java @@ -28,7 +28,7 @@ import com.cloud.user.Account; -@APICommand(name = "deleteVlanIpRange", description = "Creates a VLAN IP range.", responseObject = SuccessResponse.class, +@APICommand(name = "deleteVlanIpRange", description = "Deletes a VLAN IP range.", responseObject = SuccessResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class DeleteVlanIpRangeCmd extends BaseCmd { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CheckAndRepairVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CheckAndRepairVolumeCmd.java new file mode 100644 index 000000000000..9786a5a14679 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CheckAndRepairVolumeCmd.java @@ -0,0 +1,137 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.volume; + +import java.util.Arrays; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandResourceType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ResponseObject.ResponseView; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.VolumeResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.storage.Volume; +import com.cloud.user.Account; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; + +@APICommand(name = "checkVolume", description = "Check the volume for any errors or leaks and also repairs when repair parameter is passed, this is currently supported for KVM only", responseObject = VolumeResponse.class, entityType = {Volume.class}, + since = "4.19.1", + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) +public class CheckAndRepairVolumeCmd extends BaseAsyncCmd { + + private static final String s_name = "checkandrepairvolumeresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = VolumeResponse.class, required = true, description = "The ID of the volume") + private Long id; + + @Parameter(name = ApiConstants.REPAIR, type = CommandType.STRING, required = false, description = "parameter to repair the volume, leaks or all are the possible values") + private String repair; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public enum RepairValues { + LEAKS, ALL + } + + public Long getId() { + return id; + } + + public String getRepair() { + if (org.apache.commons.lang3.StringUtils.isNotEmpty(repair)) { + RepairValues repairType = Enum.valueOf(RepairValues.class, repair.toUpperCase()); + if (repairType == null) { + throw new InvalidParameterValueException(String.format("Repair parameter can only take the following values: %s" + Arrays.toString(RepairValues.values()))); + } + return repair.toLowerCase(); + } + return null; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Volume volume = _entityMgr.findById(Volume.class, getId()); + if (volume != null) { + return volume.getAccountId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VOLUME_CHECK; + } + + @Override + public String getEventDescription() { + return String.format("check and repair operation on volume: %s", this._uuidMgr.getUuid(Volume.class, getId())); + } + + @Override + public Long getApiResourceId() { + return id; + } + + @Override + public ApiCommandResourceType getApiResourceType() { + return ApiCommandResourceType.Volume; + } + + @Override + public void execute() throws ResourceAllocationException { + CallContext.current().setEventDetails("Volume Id: " + getId()); + Pair result = _volumeService.checkAndRepairVolume(this); + Volume volume = _responseGenerator.findVolumeById(getId()); + if (result != null) { + VolumeResponse response = _responseGenerator.createVolumeResponse(ResponseView.Full, volume); + response.setVolumeCheckResult(StringUtils.parseJsonToMap(result.first())); + if (getRepair() != null) { + response.setVolumeRepairResult(StringUtils.parseJsonToMap(result.second())); + } + response.setResponseName(getCommandName()); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to check volume and repair"); + } + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java index 00a1eabc40ba..0d502a6d7a73 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java @@ -18,6 +18,7 @@ import java.util.Date; import java.util.LinkedHashSet; +import java.util.Map; import java.util.Set; import org.apache.cloudstack.acl.RoleType; @@ -288,6 +289,14 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @Param(description = "volume uuid that is given by virtualisation provider (only for VMware)") private String externalUuid; + @SerializedName(ApiConstants.VOLUME_CHECK_RESULT) + @Param(description = "details for the volume check result, they may vary for different hypervisors, since = 4.19.1") + private Map volumeCheckResult; + + @SerializedName(ApiConstants.VOLUME_REPAIR_RESULT) + @Param(description = "details for the volume repair result, they may vary for different hypervisors, since = 4.19.1") + private Map volumeRepairResult; + public String getPath() { return path; } @@ -817,4 +826,20 @@ public String getExternalUuid() { public void setExternalUuid(String externalUuid) { this.externalUuid = externalUuid; } + + public Map getVolumeCheckResult() { + return volumeCheckResult; + } + + public void setVolumeCheckResult(Map volumeCheckResult) { + this.volumeCheckResult = volumeCheckResult; + } + + public Map getVolumeRepairResult() { + return volumeRepairResult; + } + + public void setVolumeRepairResult(Map volumeRepairResult) { + this.volumeRepairResult = volumeRepairResult; + } } diff --git a/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeAnswer.java b/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeAnswer.java new file mode 100644 index 000000000000..3dc7752bfefc --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeAnswer.java @@ -0,0 +1,57 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Answer; + +public class CheckAndRepairVolumeAnswer extends Answer { + private String volumeCheckExecutionResult; + private String volumeRepairExecutionResult; + + protected CheckAndRepairVolumeAnswer() { + super(); + } + + public CheckAndRepairVolumeAnswer(CheckAndRepairVolumeCommand cmd, boolean result, String details, String volumeCheckExecutionResult, String volumeRepairedExecutionResult) { + super(cmd, result, details); + this.volumeCheckExecutionResult = volumeCheckExecutionResult; + this.volumeRepairExecutionResult = volumeRepairedExecutionResult; + } + + public CheckAndRepairVolumeAnswer(CheckAndRepairVolumeCommand cmd, boolean result, String details) { + super(cmd, result, details); + } + + public String getVolumeCheckExecutionResult() { + return volumeCheckExecutionResult; + } + + public String getVolumeRepairExecutionResult() { + return volumeRepairExecutionResult; + } + + public void setVolumeCheckExecutionResult(String volumeCheckExecutionResult) { + this.volumeCheckExecutionResult = volumeCheckExecutionResult; + } + + public void setVolumeRepairExecutionResult(String volumeRepairExecutionResult) { + this.volumeRepairExecutionResult = volumeRepairExecutionResult; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeCommand.java new file mode 100644 index 000000000000..2553fdf477c5 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/storage/CheckAndRepairVolumeCommand.java @@ -0,0 +1,77 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api.storage; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.LogLevel; +import com.cloud.agent.api.to.StorageFilerTO; + +import java.util.Arrays; + +public class CheckAndRepairVolumeCommand extends Command { + private String path; + private StorageFilerTO pool; + private String repair; + @LogLevel(LogLevel.Log4jLevel.Off) + private byte[] passphrase; + private String encryptFormat; + + public CheckAndRepairVolumeCommand(String path, StorageFilerTO pool, String repair, byte[] passphrase, String encryptFormat) { + this.path = path; + this.pool = pool; + this.repair = repair; + this.passphrase = passphrase; + this.encryptFormat = encryptFormat; + } + + public String getPath() { + return path; + } + + public String getPoolUuid() { + return pool.getUuid(); + } + + public StorageFilerTO getPool() { + return pool; + } + + public String getRepair() { + return repair; + } + + public String getEncryptFormat() { return encryptFormat; } + + public byte[] getPassphrase() { return passphrase; } + + public void clearPassphrase() { + if (this.passphrase != null) { + Arrays.fill(this.passphrase, (byte) 0); + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/debian/control b/debian/control index 9fec540975e8..3508c7b5f754 100644 --- a/debian/control +++ b/debian/control @@ -17,14 +17,14 @@ Description: A common package which contains files which are shared by several C Package: cloudstack-management Architecture: all -Depends: ${python3:Depends}, openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), net-tools, sudo, python3-mysql.connector, augeas-tools, mysql-client | mariadb-client, adduser, bzip2, ipmitool, file, gawk, iproute2, qemu-utils, rng-tools, python3-dnspython, lsb-release, init-system-helpers (>= 1.14~), python3-setuptools +Depends: ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), net-tools, sudo, python3-mysql.connector, augeas-tools, mysql-client | mariadb-client, adduser, bzip2, ipmitool, file, gawk, iproute2, qemu-utils, rng-tools, python3-dnspython, lsb-release, init-system-helpers (>= 1.14~), python3-setuptools Conflicts: cloud-server, cloud-client, cloud-client-ui Description: CloudStack server library The CloudStack management server Package: cloudstack-agent Architecture: all -Depends: ${python:Depends}, ${python3:Depends}, openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor +Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, lsb-release, ufw, apparmor Recommends: init-system-helpers Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent @@ -34,7 +34,7 @@ Description: CloudStack agent Package: cloudstack-usage Architecture: all -Depends: openjdk-11-jre-headless | java11-runtime-headless | java11-runtime | openjdk-11-jre-headless | zulu-11, cloudstack-common (= ${source:Version}), init-system-helpers +Depends: openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), init-system-helpers Description: CloudStack usage monitor The CloudStack usage monitor provides usage accounting across the entire cloud for cloud operators to charge based on usage parameters. diff --git a/engine/api/pom.xml b/engine/api/pom.xml index 65cc4baadeec..1112e6eff8bd 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -71,7 +71,7 @@ com.sun.xml.bind jaxb-impl - ${cs.jaxb.version} + ${cs.jaxb.impl.version} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index f5812a106b8e..49a7f8c0fa63 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -44,6 +44,10 @@ enum QualityOfServiceState { MIGRATION, NO_MIGRATION } void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); + default boolean requiresAccessForMigration(DataObject dataObject) { + return false; + } + /** * intended for managed storage (cloud.storage_pool.managed = true) * if not managed, return volume.getSize() diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index 50aee83f4979..7c4d56e12b92 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -57,6 +57,8 @@ public VolumeInfo getVolume() { void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); + boolean requiresAccessForMigration(DataObject dataObject, DataStore dataStore); + /** * Creates the volume based on the given criteria * @@ -115,4 +117,8 @@ boolean copyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigration(ObjectInD VolumeInfo sourceVolume, VolumeInfo destinationVolume, boolean retryExpungeVolumeAsync); void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account sourceAccount, Account destAccount); + + Pair checkAndRepairVolume(VolumeInfo volume); + + void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host); } diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkCheckAndRepairVolume.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkCheckAndRepairVolume.java new file mode 100644 index 000000000000..eaee4d19eb38 --- /dev/null +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkCheckAndRepairVolume.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.vm; + +public class VmWorkCheckAndRepairVolume extends VmWork { + + private static final long serialVersionUID = 341816293003023824L; + + private Long volumeId; + + private String repair; + + public VmWorkCheckAndRepairVolume(long userId, long accountId, long vmId, String handlerName, + Long volumeId, String repair) { + super(userId, accountId, vmId, handlerName); + this.repair = repair; + this.volumeId = volumeId; + } + + public Long getVolumeId() { + return volumeId; + } + + public String getRepair() { + return repair; + } +} diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b5fc2e7c3a91..873993892714 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -165,6 +165,7 @@ import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanningManager; +import com.cloud.deploy.DeploymentPlanningManagerImpl; import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; @@ -212,8 +213,8 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ScopeType; -import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateVO; @@ -237,6 +238,7 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.DateUtil; import com.cloud.utils.Journal; +import com.cloud.utils.LogUtils; import com.cloud.utils.Pair; import com.cloud.utils.Predicate; import com.cloud.utils.ReflectionUse; @@ -1093,6 +1095,7 @@ protected void checkAndAttemptMigrateVmAcrossCluster(final VMInstanceVO vm, fina public void orchestrateStart(final String vmUuid, final Map params, final DeploymentPlan planToDeploy, final DeploymentPlanner planner) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException { + logger.debug(() -> LogUtils.logGsonWithoutException("Trying to start VM [%s] using plan [%s] and planner [%s].", vmUuid, planToDeploy, planner)); final CallContext cctxt = CallContext.current(); final Account account = cctxt.getCallingAccount(); final User caller = cctxt.getCallingUser(); @@ -1116,10 +1119,8 @@ public void orchestrateStart(final String vmUuid, final Map DeploymentPlanningManagerImpl.logDeploymentWithoutException(finalVm, planToDeploy, planToDeploy.getAvoids(), planner)); plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx); @@ -1140,13 +1141,12 @@ public void orchestrateStart(final String vmUuid, final Map LogUtils.logGsonWithoutException("Avoiding components [%s] in deployment of VM [%s].", finalAvoids, vmUuid)); } if (avoids == null) { avoids = new ExcludeList(); } - if (logger.isDebugEnabled()) { - logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); - } boolean planChangedByVolume = false; boolean reuseVolume = true; @@ -2206,6 +2206,8 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl boolean result = stateTransitTo(vm, Event.OperationSucceeded, null); if (result) { + vm.setPowerState(PowerState.PowerOff); + _vmDao.update(vm.getId(), vm); if (VirtualMachine.Type.User.equals(vm.type) && ResourceCountRunningVMsonly.value()) { ServiceOfferingVO offering = _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()); VMTemplateVO template = _templateDao.findByIdIncludingRemoved(vm.getTemplateId()); @@ -2760,6 +2762,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } vm.setLastHostId(srcHostId); + _vmDao.resetVmPowerStateTracking(vm.getId()); try { if (vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { _networkMgr.rollbackNicForMigration(vmSrc, profile); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 3985fae7f5e2..2d24cd7c5fa1 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1230,8 +1230,8 @@ public void release(long vmId, long hostId) { DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary); PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; - // This might impact other managed storages, grant access for PowerFlex storage pool only - if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + // This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only) + if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) { volService.revokeAccess(volumeInfo, host, dataStore); } } @@ -1509,8 +1509,8 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest disk.setDetails(getDetails(volumeInfo, dataStore)); PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; - // This might impact other managed storages, grant access for PowerFlex storage pool only - if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + // This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only) + if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) { volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); } @@ -1917,6 +1917,8 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto } } } + } else { + handleCheckAndRepairVolume(vol, vm.getVirtualMachine().getHostId()); } } else if (task.type == VolumeTaskType.MIGRATE) { pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary); @@ -1959,6 +1961,16 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto } } + private void handleCheckAndRepairVolume(Volume vol, Long hostId) { + Host host = _hostDao.findById(hostId); + try { + volService.checkAndRepairVolumeBasedOnConfig(volFactory.getVolume(vol.getId()), host); + } catch (Exception e) { + String volumeToString = getReflectOnlySelectedFields(vol); + logger.debug(String.format("Unable to check and repair volume [%s] on host [%s], due to %s.", volumeToString, host, e.getMessage())); + } + } + private boolean stateTransitTo(Volume vol, Volume.Event event) throws NoTransitionException { return _volStateMachine.transitTo(vol, event, null, _volsDao); } diff --git a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java index 82ddce10958e..e357c062c171 100644 --- a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java @@ -1131,7 +1131,7 @@ public void testIsDiskOfferingSuitableForVmSuccess() { poolListMock.add(storagePoolVoMock); Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class), any(ExcludeList.class), Mockito.eq(1)); - boolean result = virtualMachineManagerImpl.isDiskOfferingSuitableForVm(vmInstanceMock, virtualMachineProfileMock, 1L, 1L,1L, 1L); + boolean result = virtualMachineManagerImpl.isDiskOfferingSuitableForVm(vmInstanceMock, virtualMachineProfileMock, 1L, 1L, 1L, 1L); assertTrue(result); } @@ -1140,7 +1140,7 @@ public void testIsDiskOfferingSuitableForVmNegative() { Mockito.doReturn(Mockito.mock(DiskOfferingVO.class)).when(diskOfferingDaoMock).findById(anyLong()); Mockito.doReturn(new ArrayList<>()).when(storagePoolAllocatorMock).allocateToPool(any(DiskProfile.class), any(VirtualMachineProfile.class), any(DeploymentPlan.class), any(ExcludeList.class), Mockito.eq(1)); - boolean result = virtualMachineManagerImpl.isDiskOfferingSuitableForVm(vmInstanceMock, virtualMachineProfileMock, 1L, 1L,1L, 1L); + boolean result = virtualMachineManagerImpl.isDiskOfferingSuitableForVm(vmInstanceMock, virtualMachineProfileMock, 1L, 1L, 1L, 1L); assertFalse(result); } diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index a39608455f73..666c25fdf363 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -101,8 +101,10 @@ for (template in templateList) { def data = lines.findAll { it.contains(template) } if (data != null) { - def hypervisor = template.tokenize('-')[-1] - pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0] + if (data.size() > 0) { + def hypervisor = template.tokenize('-')[-1] + pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0] + } } } diff --git a/engine/schema/src/main/java/com/cloud/usage/UsageVO.java b/engine/schema/src/main/java/com/cloud/usage/UsageVO.java index 10b295f593cf..50884e3c0120 100644 --- a/engine/schema/src/main/java/com/cloud/usage/UsageVO.java +++ b/engine/schema/src/main/java/com/cloud/usage/UsageVO.java @@ -17,6 +17,7 @@ package com.cloud.usage; import java.util.Date; +import java.util.TimeZone; import javax.persistence.Column; import javax.persistence.Entity; @@ -27,9 +28,11 @@ import javax.persistence.Temporal; import javax.persistence.TemporalType; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.lang3.StringUtils; @Entity @Table(name = "cloud_usage") @@ -400,6 +403,12 @@ public void setHidden(boolean hidden) { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "usageId", "usageType", "startDate", "endDate"); + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "usageId", "usageType"); + } + + public String toString(TimeZone timeZone) { + String startDateString = DateUtil.displayDateInTimezone(timeZone, getStartDate()); + String endDateString = DateUtil.displayDateInTimezone(timeZone, getEndDate()); + return String.format("%s,\"startDate\":\"%s\",\"endDate\":\"%s\"}", StringUtils.chop(this.toString()), startDateString, endDateString); } } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index cc82813b412e..b7b787b00451 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -64,7 +64,7 @@ @Component public class VMInstanceDaoImpl extends GenericDaoBase implements VMInstanceDao { - private static final int MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT = 3; + static final int MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT = 3; protected SearchBuilder VMClusterSearch; protected SearchBuilder LHVMClusterSearch; @@ -895,17 +895,19 @@ public List listStartingWithNoHostId() { @Override public boolean updatePowerState(final long instanceId, final long powerHostId, final VirtualMachine.PowerState powerState, Date wisdomEra) { - return Transaction.execute(new TransactionCallback() { + return Transaction.execute(new TransactionCallback<>() { @Override public Boolean doInTransaction(TransactionStatus status) { boolean needToUpdate = false; VMInstanceVO instance = findById(instanceId); if (instance != null - && (null == instance.getPowerStateUpdateTime() + && (null == instance.getPowerStateUpdateTime() || instance.getPowerStateUpdateTime().before(wisdomEra))) { Long savedPowerHostId = instance.getPowerHostId(); - if (instance.getPowerState() != powerState || savedPowerHostId == null - || savedPowerHostId.longValue() != powerHostId) { + if (instance.getPowerState() != powerState + || savedPowerHostId == null + || savedPowerHostId != powerHostId + || !isPowerStateInSyncWithInstanceState(powerState, powerHostId, instance)) { instance.setPowerState(powerState); instance.setPowerHostId(powerHostId); instance.setPowerStateUpdateCount(1); @@ -927,6 +929,17 @@ public Boolean doInTransaction(TransactionStatus status) { }); } + private boolean isPowerStateInSyncWithInstanceState(final VirtualMachine.PowerState powerState, final long powerHostId, final VMInstanceVO instance) { + State instanceState = instance.getState(); + if ((powerState == VirtualMachine.PowerState.PowerOff && instanceState == State.Running) + || (powerState == VirtualMachine.PowerState.PowerOn && instanceState == State.Stopped)) { + logger.debug(String.format("VM id: %d on host id: %d and power host id: %d is in %s state, but power state is %s", + instance.getId(), instance.getHostId(), powerHostId, instanceState, powerState)); + return false; + } + return true; + } + @Override public boolean isPowerStateUpToDate(final long instanceId) { VMInstanceVO instance = findById(instanceId); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java index 5239aa50ad0e..011262afe392 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/reservation/dao/ReservationDaoImpl.java @@ -59,7 +59,7 @@ public ReservationDaoImpl() { listDomainAndTypeSearch.done(); listDomainAndTypeAndNoTagSearch = createSearchBuilder(); - listDomainAndTypeAndNoTagSearch.and(ACCOUNT_ID, listDomainAndTypeAndNoTagSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + listDomainAndTypeAndNoTagSearch.and(DOMAIN_ID, listDomainAndTypeAndNoTagSearch.entity().getDomainId(), SearchCriteria.Op.EQ); listDomainAndTypeAndNoTagSearch.and(RESOURCE_TYPE, listDomainAndTypeAndNoTagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); listDomainAndTypeAndNoTagSearch.and(RESOURCE_TAG, listDomainAndTypeAndNoTagSearch.entity().getTag(), SearchCriteria.Op.NULL); listDomainAndTypeAndNoTagSearch.done(); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41900to42000.sql b/engine/schema/src/main/resources/META-INF/db/schema-41900to42000.sql index 87925e89740c..5c963c074165 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41900to42000.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41900to42000.sql @@ -29,6 +29,12 @@ DROP INDEX `i_resource_count__type_domaintId`, ADD UNIQUE INDEX `i_resource_count__type_tag_accountId` (`type`,`tag`,`account_id`), ADD UNIQUE INDEX `i_resource_count__type_tag_domaintId` (`type`,`tag`,`domain_id`); +-- Update Default System offering for Router to 512MiB +UPDATE `cloud`.`service_offering` SET ram_size = 512 WHERE unique_name IN ("Cloud.Com-SoftwareRouter", "Cloud.Com-SoftwareRouter-Local", + "Cloud.Com-InternalLBVm", "Cloud.Com-InternalLBVm-Local", + "Cloud.Com-ElasticLBVm", "Cloud.Com-ElasticLBVm-Local") + AND system_use = 1 AND ram_size < 512; + -- Create command_timeout table and populate it CREATE TABLE IF NOT EXISTS `cloud`.`command_timeout` ( id bigint(20) unsigned not null auto_increment primary key, @@ -48,4 +54,4 @@ VALUES ('com.cloud.agent.api.CheckHealthCommand', 50, now(), now()), ('com.cloud.agent.api.routing.GetAutoScaleMetricsCommand', 30, now(), now()), ('org.apache.cloudstack.ca.SetupKeyStoreCommand', 30, now(), now()), - ('org.apache.cloudstack.storage.command.browser.ListDataStoreObjectsCommand', 15, now(), now()); \ No newline at end of file + ('org.apache.cloudstack.storage.command.browser.ListDataStoreObjectsCommand', 15, now(), now()); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.account_netstats_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_netstats_view.sql new file mode 100644 index 000000000000..11193c465fd7 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_netstats_view.sql @@ -0,0 +1,31 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.account_netstats_view source + + +DROP VIEW IF EXISTS `cloud`.`account_netstats_view`; + +CREATE VIEW `cloud`.`account_netstats_view` AS +select + `user_statistics`.`account_id` AS `account_id`, + (sum(`user_statistics`.`net_bytes_received`) + sum(`user_statistics`.`current_bytes_received`)) AS `bytesReceived`, + (sum(`user_statistics`.`net_bytes_sent`) + sum(`user_statistics`.`current_bytes_sent`)) AS `bytesSent` +from + `user_statistics` +group by + `user_statistics`.`account_id`; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.account_vmstats_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_vmstats_view.sql new file mode 100644 index 000000000000..df6a216b0f8e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.account_vmstats_view.sql @@ -0,0 +1,35 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.account_vmstats_view source + + +DROP VIEW IF EXISTS `cloud`.`account_vmstats_view`; + +CREATE VIEW `cloud`.`account_vmstats_view` AS +select + `vm_instance`.`account_id` AS `account_id`, + `vm_instance`.`state` AS `state`, + count(0) AS `vmcount` +from + `vm_instance` +where + ((`vm_instance`.`vm_type` = 'User') + and (`vm_instance`.`removed` is null)) +group by + `vm_instance`.`account_id`, + `vm_instance`.`state`; diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.affinity_group_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.affinity_group_view.sql new file mode 100644 index 000000000000..90a398e1ec5c --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.affinity_group_view.sql @@ -0,0 +1,60 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.affinity_group_view source + + +DROP VIEW IF EXISTS `cloud`.`affinity_group_view`; + +CREATE VIEW `cloud`.`affinity_group_view` AS +select + `affinity_group`.`id` AS `id`, + `affinity_group`.`name` AS `name`, + `affinity_group`.`type` AS `type`, + `affinity_group`.`description` AS `description`, + `affinity_group`.`uuid` AS `uuid`, + `affinity_group`.`acl_type` AS `acl_type`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `vm_instance`.`id` AS `vm_id`, + `vm_instance`.`uuid` AS `vm_uuid`, + `vm_instance`.`name` AS `vm_name`, + `vm_instance`.`state` AS `vm_state`, + `user_vm`.`display_name` AS `vm_display_name` +from + ((((((`affinity_group` +join `account` on + ((`affinity_group`.`account_id` = `account`.`id`))) +join `domain` on + ((`affinity_group`.`domain_id` = `domain`.`id`))) +left join `projects` on + ((`projects`.`project_account_id` = `account`.`id`))) +left join `affinity_group_vm_map` on + ((`affinity_group`.`id` = `affinity_group_vm_map`.`affinity_group_id`))) +left join `vm_instance` on + ((`vm_instance`.`id` = `affinity_group_vm_map`.`instance_id`))) +left join `user_vm` on + ((`user_vm`.`id` = `vm_instance`.`id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql index af885b9413ff..201ece95023a 100644 --- a/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.domain_view.sql @@ -18,6 +18,7 @@ -- VIEW `cloud`.`domain_view`; DROP VIEW IF EXISTS `cloud`.`domain_view`; + CREATE VIEW `cloud`.`domain_view` AS select `domain`.`id` AS `id`, diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.event_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.event_view.sql new file mode 100644 index 000000000000..0a15ae4c0c91 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.event_view.sql @@ -0,0 +1,63 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.event_view source + + +DROP VIEW IF EXISTS `cloud`.`event_view`; + +CREATE VIEW `cloud`.`event_view` AS +select + `event`.`id` AS `id`, + `event`.`uuid` AS `uuid`, + `event`.`type` AS `type`, + `event`.`state` AS `state`, + `event`.`description` AS `description`, + `event`.`resource_id` AS `resource_id`, + `event`.`resource_type` AS `resource_type`, + `event`.`created` AS `created`, + `event`.`level` AS `level`, + `event`.`parameters` AS `parameters`, + `event`.`start_id` AS `start_id`, + `eve`.`uuid` AS `start_uuid`, + `event`.`user_id` AS `user_id`, + `event`.`archived` AS `archived`, + `event`.`display` AS `display`, + `user`.`username` AS `user_name`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name` +from + (((((`event` +join `account` on + ((`event`.`account_id` = `account`.`id`))) +join `domain` on + ((`event`.`domain_id` = `domain`.`id`))) +join `user` on + ((`event`.`user_id` = `user`.`id`))) +left join `projects` on + ((`projects`.`project_account_id` = `event`.`account_id`))) +left join `event` `eve` on + ((`event`.`start_id` = `eve`.`id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.free_ip_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.free_ip_view.sql new file mode 100644 index 000000000000..29c22f332154 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.free_ip_view.sql @@ -0,0 +1,32 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.free_ip_view source + + +DROP VIEW IF EXISTS `cloud`.`free_ip_view`; + +CREATE VIEW `cloud`.`free_ip_view` AS +select + count(`user_ip_address`.`id`) AS `free_ip` +from + (`user_ip_address` +join `vlan` on + (((`vlan`.`id` = `user_ip_address`.`vlan_db_id`) + and (`vlan`.`vlan_type` = 'VirtualNetwork')))) +where + (`user_ip_address`.`state` = 'Free'); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.image_store_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.image_store_view.sql new file mode 100644 index 000000000000..88d68302d4cb --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.image_store_view.sql @@ -0,0 +1,45 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.image_store_view source + + +DROP VIEW IF EXISTS `cloud`.`image_store_view`; + +CREATE VIEW `cloud`.`image_store_view` AS +select + `image_store`.`id` AS `id`, + `image_store`.`uuid` AS `uuid`, + `image_store`.`name` AS `name`, + `image_store`.`image_provider_name` AS `image_provider_name`, + `image_store`.`protocol` AS `protocol`, + `image_store`.`url` AS `url`, + `image_store`.`scope` AS `scope`, + `image_store`.`role` AS `role`, + `image_store`.`readonly` AS `readonly`, + `image_store`.`removed` AS `removed`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `image_store_details`.`name` AS `detail_name`, + `image_store_details`.`value` AS `detail_value` +from + ((`image_store` +left join `data_center` on + ((`image_store`.`data_center_id` = `data_center`.`id`))) +left join `image_store_details` on + ((`image_store_details`.`store_id` = `image_store`.`id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.instance_group_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.instance_group_view.sql new file mode 100644 index 000000000000..8bdc81847181 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.instance_group_view.sql @@ -0,0 +1,48 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.instance_group_view source + + +DROP VIEW IF EXISTS `cloud`.`instance_group_view`; + +CREATE VIEW `cloud`.`instance_group_view` AS +select + `instance_group`.`id` AS `id`, + `instance_group`.`uuid` AS `uuid`, + `instance_group`.`name` AS `name`, + `instance_group`.`removed` AS `removed`, + `instance_group`.`created` AS `created`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name` +from + (((`instance_group` +join `account` on + ((`instance_group`.`account_id` = `account`.`id`))) +join `domain` on + ((`account`.`domain_id` = `domain`.`id`))) +left join `projects` on + ((`projects`.`project_account_id` = `instance_group`.`account_id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.last_annotation_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.last_annotation_view.sql new file mode 100644 index 000000000000..f317fbacc4d5 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.last_annotation_view.sql @@ -0,0 +1,43 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.last_annotation_view source + + +DROP VIEW IF EXISTS `cloud`.`last_annotation_view`; + +CREATE VIEW `cloud`.`last_annotation_view` AS +select + `annotations`.`uuid` AS `uuid`, + `annotations`.`annotation` AS `annotation`, + `annotations`.`entity_uuid` AS `entity_uuid`, + `annotations`.`entity_type` AS `entity_type`, + `annotations`.`user_uuid` AS `user_uuid`, + `annotations`.`created` AS `created`, + `annotations`.`removed` AS `removed` +from + `annotations` +where + `annotations`.`created` in ( + select + max(`annotations`.`created`) + from + `annotations` + where + (`annotations`.`removed` is null) + group by + `annotations`.`entity_uuid`); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.mshost_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.mshost_view.sql new file mode 100644 index 000000000000..9b68f170e38e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.mshost_view.sql @@ -0,0 +1,46 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.mshost_view source + + +DROP VIEW IF EXISTS `cloud`.`mshost_view`; + +CREATE VIEW `cloud`.`mshost_view` AS +select + `mshost`.`id` AS `id`, + `mshost`.`msid` AS `msid`, + `mshost`.`runid` AS `runid`, + `mshost`.`name` AS `name`, + `mshost`.`uuid` AS `uuid`, + `mshost`.`state` AS `state`, + `mshost`.`version` AS `version`, + `mshost`.`service_ip` AS `service_ip`, + `mshost`.`service_port` AS `service_port`, + `mshost`.`last_update` AS `last_update`, + `mshost`.`removed` AS `removed`, + `mshost`.`alert_count` AS `alert_count`, + `mshost_status`.`last_jvm_start` AS `last_jvm_start`, + `mshost_status`.`last_jvm_stop` AS `last_jvm_stop`, + `mshost_status`.`last_system_boot` AS `last_system_boot`, + `mshost_status`.`os_distribution` AS `os_distribution`, + `mshost_status`.`java_name` AS `java_name`, + `mshost_status`.`java_version` AS `java_version` +from + (`mshost` +left join `mshost_status` on + ((`mshost`.`uuid` = `mshost_status`.`ms_id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.project_account_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_account_view.sql new file mode 100644 index 000000000000..c89618970cd2 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_account_view.sql @@ -0,0 +1,54 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.project_account_view source + + +DROP VIEW IF EXISTS `cloud`.`project_account_view`; + +CREATE VIEW `cloud`.`project_account_view` AS +select + `project_account`.`id` AS `id`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `user`.`id` AS `user_id`, + `user`.`uuid` AS `user_uuid`, + `user`.`username` AS `user_name`, + `project_account`.`account_role` AS `account_role`, + `project_role`.`id` AS `project_role_id`, + `project_role`.`uuid` AS `project_role_uuid`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path` +from + (((((`project_account` +join `account` on + ((`project_account`.`account_id` = `account`.`id`))) +join `domain` on + ((`account`.`domain_id` = `domain`.`id`))) +join `projects` on + ((`projects`.`id` = `project_account`.`project_id`))) +left join `project_role` on + ((`project_account`.`project_role_id` = `project_role`.`id`))) +left join `user` on + ((`project_account`.`user_id` = `user`.`id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.project_invitation_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_invitation_view.sql new file mode 100644 index 000000000000..fae35b9373e6 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_invitation_view.sql @@ -0,0 +1,52 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.project_invitation_view source + + +DROP VIEW IF EXISTS `cloud`.`project_invitation_view`; + +CREATE VIEW `cloud`.`project_invitation_view` AS +select + `project_invitations`.`id` AS `id`, + `project_invitations`.`uuid` AS `uuid`, + `project_invitations`.`email` AS `email`, + `project_invitations`.`created` AS `created`, + `project_invitations`.`state` AS `state`, + `project_invitations`.`project_role_id` AS `project_role_id`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `user`.`id` AS `user_id`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path` +from + ((((`project_invitations` +left join `account` on + ((`project_invitations`.`account_id` = `account`.`id`))) +left join `domain` on + ((`project_invitations`.`domain_id` = `domain`.`id`))) +left join `projects` on + ((`projects`.`id` = `project_invitations`.`project_id`))) +left join `user` on + ((`project_invitations`.`user_id` = `user`.`id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.project_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_view.sql new file mode 100644 index 000000000000..31461b1dd1ea --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.project_view.sql @@ -0,0 +1,50 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.project_view source + + +DROP VIEW IF EXISTS `cloud`.`project_view`; + +CREATE VIEW `cloud`.`project_view` AS +select + `projects`.`id` AS `id`, + `projects`.`uuid` AS `uuid`, + `projects`.`name` AS `name`, + `projects`.`display_text` AS `display_text`, + `projects`.`state` AS `state`, + `projects`.`removed` AS `removed`, + `projects`.`created` AS `created`, + `projects`.`project_account_id` AS `project_account_id`, + `account`.`account_name` AS `owner`, + `pacct`.`account_id` AS `account_id`, + `pacct`.`user_id` AS `user_id`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path` +from + ((((`projects` +join `domain` on + ((`projects`.`domain_id` = `domain`.`id`))) +join `project_account` on + (((`projects`.`id` = `project_account`.`project_id`) + and (`project_account`.`account_role` = 'Admin')))) +join `account` on + ((`account`.`id` = `project_account`.`account_id`))) +left join `project_account` `pacct` on + ((`projects`.`id` = `pacct`.`project_id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.resource_tag_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.resource_tag_view.sql new file mode 100644 index 000000000000..3d77d49f8701 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.resource_tag_view.sql @@ -0,0 +1,51 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.resource_tag_view source + + +DROP VIEW IF EXISTS `cloud`.`resource_tag_view`; + +CREATE VIEW `cloud`.`resource_tag_view` AS +select + `resource_tags`.`id` AS `id`, + `resource_tags`.`uuid` AS `uuid`, + `resource_tags`.`key` AS `key`, + `resource_tags`.`value` AS `value`, + `resource_tags`.`resource_id` AS `resource_id`, + `resource_tags`.`resource_uuid` AS `resource_uuid`, + `resource_tags`.`resource_type` AS `resource_type`, + `resource_tags`.`customer` AS `customer`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name` +from + (((`resource_tags` +join `account` on + ((`resource_tags`.`account_id` = `account`.`id`))) +join `domain` on + ((`resource_tags`.`domain_id` = `domain`.`id`))) +left join `projects` on + ((`projects`.`project_account_id` = `resource_tags`.`account_id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.security_group_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.security_group_view.sql new file mode 100644 index 000000000000..3cae860c1c39 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.security_group_view.sql @@ -0,0 +1,79 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.security_group_view source + + +DROP VIEW IF EXISTS `cloud`.`security_group_view`; + +CREATE VIEW `cloud`.`security_group_view` AS +select + `security_group`.`id` AS `id`, + `security_group`.`name` AS `name`, + `security_group`.`description` AS `description`, + `security_group`.`uuid` AS `uuid`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `security_group_rule`.`id` AS `rule_id`, + `security_group_rule`.`uuid` AS `rule_uuid`, + `security_group_rule`.`type` AS `rule_type`, + `security_group_rule`.`start_port` AS `rule_start_port`, + `security_group_rule`.`end_port` AS `rule_end_port`, + `security_group_rule`.`protocol` AS `rule_protocol`, + `security_group_rule`.`allowed_network_id` AS `rule_allowed_network_id`, + `security_group_rule`.`allowed_ip_cidr` AS `rule_allowed_ip_cidr`, + `security_group_rule`.`create_status` AS `rule_create_status`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `resource_tags`.`account_id` AS `tag_account_id`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + `async_job`.`id` AS `job_id`, + `async_job`.`uuid` AS `job_uuid`, + `async_job`.`job_status` AS `job_status`, + `async_job`.`account_id` AS `job_account_id` +from + ((((((`security_group` +left join `security_group_rule` on + ((`security_group`.`id` = `security_group_rule`.`security_group_id`))) +join `account` on + ((`security_group`.`account_id` = `account`.`id`))) +join `domain` on + ((`security_group`.`domain_id` = `domain`.`id`))) +left join `projects` on + ((`projects`.`project_account_id` = `security_group`.`account_id`))) +left join `resource_tags` on + (((`resource_tags`.`resource_id` = `security_group`.`id`) + and (`resource_tags`.`resource_type` = 'SecurityGroup')))) +left join `async_job` on + (((`async_job`.`instance_id` = `security_group`.`id`) + and (`async_job`.`instance_type` = 'SecurityGroup') + and (`async_job`.`job_status` = 0)))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql new file mode 100644 index 000000000000..fd21fff14944 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.volume_view.sql @@ -0,0 +1,156 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- VIEW `cloud`.`volume_view`; + +DROP VIEW IF EXISTS `cloud`.`volume_view`; + +CREATE VIEW `cloud`.`volume_view` AS +SELECT + `volumes`.`id` AS `id`, + `volumes`.`uuid` AS `uuid`, + `volumes`.`name` AS `name`, + `volumes`.`device_id` AS `device_id`, + `volumes`.`volume_type` AS `volume_type`, + `volumes`.`provisioning_type` AS `provisioning_type`, + `volumes`.`size` AS `size`, + `volumes`.`min_iops` AS `min_iops`, + `volumes`.`max_iops` AS `max_iops`, + `volumes`.`created` AS `created`, + `volumes`.`state` AS `state`, + `volumes`.`attached` AS `attached`, + `volumes`.`removed` AS `removed`, + `volumes`.`display_volume` AS `display_volume`, + `volumes`.`format` AS `format`, + `volumes`.`path` AS `path`, + `volumes`.`chain_info` AS `chain_info`, + `volumes`.`external_uuid` AS `external_uuid`, + `account`.`id` AS `account_id`, + `account`.`uuid` AS `account_uuid`, + `account`.`account_name` AS `account_name`, + `account`.`type` AS `account_type`, + `domain`.`id` AS `domain_id`, + `domain`.`uuid` AS `domain_uuid`, + `domain`.`name` AS `domain_name`, + `domain`.`path` AS `domain_path`, + `projects`.`id` AS `project_id`, + `projects`.`uuid` AS `project_uuid`, + `projects`.`name` AS `project_name`, + `data_center`.`id` AS `data_center_id`, + `data_center`.`uuid` AS `data_center_uuid`, + `data_center`.`name` AS `data_center_name`, + `data_center`.`networktype` AS `data_center_type`, + `vm_instance`.`id` AS `vm_id`, + `vm_instance`.`uuid` AS `vm_uuid`, + `vm_instance`.`name` AS `vm_name`, + `vm_instance`.`state` AS `vm_state`, + `vm_instance`.`vm_type` AS `vm_type`, + `user_vm`.`display_name` AS `vm_display_name`, + `volume_store_ref`.`size` AS `volume_store_size`, + `volume_store_ref`.`download_pct` AS `download_pct`, + `volume_store_ref`.`download_state` AS `download_state`, + `volume_store_ref`.`error_str` AS `error_str`, + `volume_store_ref`.`created` AS `created_on_store`, + `disk_offering`.`id` AS `disk_offering_id`, + `disk_offering`.`uuid` AS `disk_offering_uuid`, + `disk_offering`.`name` AS `disk_offering_name`, + `disk_offering`.`display_text` AS `disk_offering_display_text`, + `disk_offering`.`use_local_storage` AS `use_local_storage`, + `service_offering`.`system_use` AS `system_use`, + `disk_offering`.`bytes_read_rate` AS `bytes_read_rate`, + `disk_offering`.`bytes_write_rate` AS `bytes_write_rate`, + `disk_offering`.`iops_read_rate` AS `iops_read_rate`, + `disk_offering`.`iops_write_rate` AS `iops_write_rate`, + `disk_offering`.`cache_mode` AS `cache_mode`, + `storage_pool`.`id` AS `pool_id`, + `storage_pool`.`uuid` AS `pool_uuid`, + `storage_pool`.`name` AS `pool_name`, + `cluster`.`id` AS `cluster_id`, + `cluster`.`name` AS `cluster_name`, + `cluster`.`uuid` AS `cluster_uuid`, + `cluster`.`hypervisor_type` AS `hypervisor_type`, + `vm_template`.`id` AS `template_id`, + `vm_template`.`uuid` AS `template_uuid`, + `vm_template`.`extractable` AS `extractable`, + `vm_template`.`type` AS `template_type`, + `vm_template`.`name` AS `template_name`, + `vm_template`.`display_text` AS `template_display_text`, + `iso`.`id` AS `iso_id`, + `iso`.`uuid` AS `iso_uuid`, + `iso`.`name` AS `iso_name`, + `iso`.`display_text` AS `iso_display_text`, + `resource_tags`.`id` AS `tag_id`, + `resource_tags`.`uuid` AS `tag_uuid`, + `resource_tags`.`key` AS `tag_key`, + `resource_tags`.`value` AS `tag_value`, + `resource_tags`.`domain_id` AS `tag_domain_id`, + `resource_tags`.`account_id` AS `tag_account_id`, + `resource_tags`.`resource_id` AS `tag_resource_id`, + `resource_tags`.`resource_uuid` AS `tag_resource_uuid`, + `resource_tags`.`resource_type` AS `tag_resource_type`, + `resource_tags`.`customer` AS `tag_customer`, + `async_job`.`id` AS `job_id`, + `async_job`.`uuid` AS `job_uuid`, + `async_job`.`job_status` AS `job_status`, + `async_job`.`account_id` AS `job_account_id`, + `host_pod_ref`.`id` AS `pod_id`, + `host_pod_ref`.`uuid` AS `pod_uuid`, + `host_pod_ref`.`name` AS `pod_name`, + `resource_tag_account`.`account_name` AS `tag_account_name`, + `resource_tag_domain`.`uuid` AS `tag_domain_uuid`, + `resource_tag_domain`.`name` AS `tag_domain_name` +FROM + ((((((((((((((((((`volumes` +JOIN `account`ON + ((`volumes`.`account_id` = `account`.`id`))) +JOIN `domain`ON + ((`volumes`.`domain_id` = `domain`.`id`))) +LEFT JOIN `projects`ON + ((`projects`.`project_account_id` = `account`.`id`))) +LEFT JOIN `data_center`ON + ((`volumes`.`data_center_id` = `data_center`.`id`))) +LEFT JOIN `vm_instance`ON + ((`volumes`.`instance_id` = `vm_instance`.`id`))) +LEFT JOIN `user_vm`ON + ((`user_vm`.`id` = `vm_instance`.`id`))) +LEFT JOIN `volume_store_ref`ON + ((`volumes`.`id` = `volume_store_ref`.`volume_id`))) +LEFT JOIN `service_offering`ON + ((`vm_instance`.`service_offering_id` = `service_offering`.`id`))) +LEFT JOIN `disk_offering`ON + ((`volumes`.`disk_offering_id` = `disk_offering`.`id`))) +LEFT JOIN `storage_pool`ON + ((`volumes`.`pool_id` = `storage_pool`.`id`))) +LEFT JOIN `host_pod_ref`ON + ((`storage_pool`.`pod_id` = `host_pod_ref`.`id`))) +LEFT JOIN `cluster`ON + ((`storage_pool`.`cluster_id` = `cluster`.`id`))) +LEFT JOIN `vm_template`ON + ((`volumes`.`template_id` = `vm_template`.`id`))) +LEFT JOIN `vm_template` `iso`ON + ((`iso`.`id` = `volumes`.`iso_id`))) +LEFT JOIN `resource_tags`ON + (((`resource_tags`.`resource_id` = `volumes`.`id`) + and (`resource_tags`.`resource_type` = 'Volume')))) +LEFT JOIN `async_job`ON + (((`async_job`.`instance_id` = `volumes`.`id`) + and (`async_job`.`instance_type` = 'Volume') + and (`async_job`.`job_status` = 0)))) +LEFT JOIN `account` `resource_tag_account`ON + ((`resource_tag_account`.`id` = `resource_tags`.`account_id`))) +LEFT JOIN `domain` `resource_tag_domain`ON + ((`resource_tag_domain`.`id` = `resource_tags`.`domain_id`))); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql new file mode 100644 index 000000000000..cb762a578839 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.vpc_offering_view.sql @@ -0,0 +1,63 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- cloud.vpc_offering_view source + + +DROP VIEW IF EXISTS `cloud`.`vpc_offering_view`; + +CREATE VIEW `cloud`.`vpc_offering_view` AS +select + `vpc_offerings`.`id` AS `id`, + `vpc_offerings`.`uuid` AS `uuid`, + `vpc_offerings`.`name` AS `name`, + `vpc_offerings`.`unique_name` AS `unique_name`, + `vpc_offerings`.`display_text` AS `display_text`, + `vpc_offerings`.`state` AS `state`, + `vpc_offerings`.`default` AS `default`, + `vpc_offerings`.`created` AS `created`, + `vpc_offerings`.`removed` AS `removed`, + `vpc_offerings`.`service_offering_id` AS `service_offering_id`, + `vpc_offerings`.`supports_distributed_router` AS `supports_distributed_router`, + `vpc_offerings`.`supports_region_level_vpc` AS `supports_region_level_vpc`, + `vpc_offerings`.`redundant_router_service` AS `redundant_router_service`, + `vpc_offerings`.`sort_key` AS `sort_key`, + group_concat(distinct `domain`.`id` separator ',') AS `domain_id`, + group_concat(distinct `domain`.`uuid` separator ',') AS `domain_uuid`, + group_concat(distinct `domain`.`name` separator ',') AS `domain_name`, + group_concat(distinct `domain`.`path` separator ',') AS `domain_path`, + group_concat(distinct `zone`.`id` separator ',') AS `zone_id`, + group_concat(distinct `zone`.`uuid` separator ',') AS `zone_uuid`, + group_concat(distinct `zone`.`name` separator ',') AS `zone_name`, + `offering_details`.`value` AS `internet_protocol` +from + (((((`vpc_offerings` +left join `vpc_offering_details` `domain_details` on + (((`domain_details`.`offering_id` = `vpc_offerings`.`id`) + and (`domain_details`.`name` = 'domainid')))) +left join `domain` on + ((0 <> find_in_set(`domain`.`id`, `domain_details`.`value`)))) +left join `vpc_offering_details` `zone_details` on + (((`zone_details`.`offering_id` = `vpc_offerings`.`id`) + and (`zone_details`.`name` = 'zoneid')))) +left join `data_center` `zone` on + ((0 <> find_in_set(`zone`.`id`, `zone_details`.`value`)))) +left join `vpc_offering_details` `offering_details` on + (((`offering_details`.`offering_id` = `vpc_offerings`.`id`) + and (`offering_details`.`name` = 'internetprotocol')))) +group by + `vpc_offerings`.`id`; diff --git a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java index 00eae3b78d4c..4a32dc083590 100644 --- a/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/vm/dao/VMInstanceDaoImplTest.java @@ -17,23 +17,33 @@ package com.cloud.vm.dao; -import com.cloud.utils.Pair; -import com.cloud.vm.VirtualMachine; +import static com.cloud.vm.VirtualMachine.State.Running; +import static com.cloud.vm.VirtualMachine.State.Stopped; +import static com.cloud.vm.dao.VMInstanceDaoImpl.MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Date; + import org.joda.time.DateTime; import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.junit.Assert; import org.mockito.Mock; - -import static com.cloud.vm.VirtualMachine.State.Running; -import static com.cloud.vm.VirtualMachine.State.Stopped; - -import static org.mockito.Mockito.when; -import com.cloud.vm.VMInstanceVO; import org.mockito.MockitoAnnotations; import org.mockito.Spy; +import com.cloud.utils.Pair; +import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; + /** * Created by sudharma_jain on 3/2/17. */ @@ -63,16 +73,130 @@ public void tearDown() throws Exception { } @Test - public void testUpdateState() throws Exception { + public void testUpdateState() { Long destHostId = null; - Pair opaqueMock = new Pair(new Long(1), destHostId); + Pair opaqueMock = new Pair<>(1L, destHostId); vmInstanceDao.updateState(Stopped, VirtualMachine.Event.FollowAgentPowerOffReport, Stopped, vm , opaqueMock); } @Test - public void testIfStateAndHostUnchanged() throws Exception { - Assert.assertEquals(vmInstanceDao.ifStateUnchanged(Stopped, Stopped, null, null), true); - Assert.assertEquals(vmInstanceDao.ifStateUnchanged(Stopped, Running, null, null), false); + public void testIfStateAndHostUnchanged() { + assertTrue(vmInstanceDao.ifStateUnchanged(Stopped, Stopped, null, null)); + assertFalse(vmInstanceDao.ifStateUnchanged(Stopped, Running, null, null)); + } + + @Test + public void testUpdatePowerStateDifferentPowerState() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); + doReturn(vm).when(vmInstanceDao).findById(anyLong()); + doReturn(true).when(vmInstanceDao).update(anyLong(), any()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); + + verify(vm, times(1)).setPowerState(VirtualMachine.PowerState.PowerOff); + verify(vm, times(1)).setPowerHostId(1L); + verify(vm, times(1)).setPowerStateUpdateCount(1); + verify(vm, times(1)).setPowerStateUpdateTime(any(Date.class)); + + assertTrue(result); + } + + @Test + public void testUpdatePowerStateVmNotFound() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); + doReturn(null).when(vmInstanceDao).findById(anyLong()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); + + verify(vm, never()).setPowerState(any()); + verify(vm, never()).setPowerHostId(anyLong()); + verify(vm, never()).setPowerStateUpdateCount(any(Integer.class)); + verify(vm, never()).setPowerStateUpdateTime(any(Date.class)); + + assertFalse(result); + } + + @Test + public void testUpdatePowerStateNoChangeFirstUpdate() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); + when(vm.getState()).thenReturn(Running); + when(vm.getPowerStateUpdateCount()).thenReturn(1); + doReturn(vm).when(vmInstanceDao).findById(anyLong()); + doReturn(true).when(vmInstanceDao).update(anyLong(), any()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); + + verify(vm, never()).setPowerState(any()); + verify(vm, never()).setPowerHostId(anyLong()); + verify(vm, times(1)).setPowerStateUpdateCount(2); + verify(vm, times(1)).setPowerStateUpdateTime(any(Date.class)); + + assertTrue(result); + } + + @Test + public void testUpdatePowerStateNoChangeMaxUpdatesValidState() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); + when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); + when(vm.getState()).thenReturn(Running); + doReturn(vm).when(vmInstanceDao).findById(anyLong()); + doReturn(true).when(vmInstanceDao).update(anyLong(), any()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); + + verify(vm, never()).setPowerState(any()); + verify(vm, never()).setPowerHostId(anyLong()); + verify(vm, never()).setPowerStateUpdateCount(any(Integer.class)); + verify(vm, never()).setPowerStateUpdateTime(any(Date.class)); + + assertFalse(result); } + @Test + public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmStopped() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOn); + when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); + when(vm.getState()).thenReturn(Stopped); + doReturn(vm).when(vmInstanceDao).findById(anyLong()); + doReturn(true).when(vmInstanceDao).update(anyLong(), any()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOn, new Date()); + + verify(vm, times(1)).setPowerState(any()); + verify(vm, times(1)).setPowerHostId(anyLong()); + verify(vm, times(1)).setPowerStateUpdateCount(1); + verify(vm, times(1)).setPowerStateUpdateTime(any(Date.class)); + + assertTrue(result); + } + + @Test + public void testUpdatePowerStateNoChangeMaxUpdatesInvalidStateVmRunning() { + when(vm.getPowerStateUpdateTime()).thenReturn(null); + when(vm.getPowerHostId()).thenReturn(1L); + when(vm.getPowerState()).thenReturn(VirtualMachine.PowerState.PowerOff); + when(vm.getPowerStateUpdateCount()).thenReturn(MAX_CONSECUTIVE_SAME_STATE_UPDATE_COUNT); + when(vm.getState()).thenReturn(Running); + doReturn(vm).when(vmInstanceDao).findById(anyLong()); + doReturn(true).when(vmInstanceDao).update(anyLong(), any()); + + boolean result = vmInstanceDao.updatePowerState(1L, 1L, VirtualMachine.PowerState.PowerOff, new Date()); + + verify(vm, times(1)).setPowerState(any()); + verify(vm, times(1)).setPowerHostId(anyLong()); + verify(vm, times(1)).setPowerStateUpdateCount(1); + verify(vm, times(1)).setPowerStateUpdateTime(any(Date.class)); + + assertTrue(result); + } } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 6a6305987664..49f7abccda5b 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -32,8 +32,10 @@ import javax.inject.Inject; +import com.cloud.storage.VolumeApiServiceImpl; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; +import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -80,6 +82,7 @@ import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; @@ -88,9 +91,12 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyTargetsCommand; +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; import com.cloud.agent.api.storage.ListVolumeAnswer; import com.cloud.agent.api.storage.ListVolumeCommand; import com.cloud.agent.api.storage.ResizeVolumeCommand; +import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.alert.AlertManager; @@ -111,6 +117,7 @@ import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceState; import com.cloud.server.ManagementService; +import com.cloud.storage.CheckAndRepairVolumePayload; import com.cloud.storage.DataStoreRole; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.ScopeType; @@ -200,7 +207,7 @@ public class VolumeServiceImpl implements VolumeService { @Inject private VolumeOrchestrationService _volumeMgr; @Inject - private StorageManager _storageMgr; + protected StorageManager _storageMgr; @Inject private AnnotationDao annotationDao; @Inject @@ -268,6 +275,19 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } } + @Override + public boolean requiresAccessForMigration(DataObject dataObject, DataStore dataStore) { + DataStoreDriver dataStoreDriver = dataStore != null ? dataStore.getDriver() : null; + if (dataStoreDriver == null) { + return false; + } + + if (dataStoreDriver instanceof PrimaryDataStoreDriver) { + return ((PrimaryDataStoreDriver)dataStoreDriver).requiresAccessForMigration(dataObject); + } + return false; + } + @Override public AsyncCallFuture createVolumeAsync(VolumeInfo volume, DataStore dataStore) { AsyncCallFuture future = new AsyncCallFuture(); @@ -2763,6 +2783,62 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) { return snapshot; } + @Override + public void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host) { + if (HypervisorType.KVM.equals(host.getHypervisorType()) && DataObjectType.VOLUME.equals(dataObject.getType())) { + VolumeInfo volumeInfo = volFactory.getVolume(dataObject.getId()); + if (VolumeApiServiceImpl.AllowCheckAndRepairVolume.valueIn(volumeInfo.getPoolId())) { + logger.info(String.format("Trying to check and repair the volume %d", dataObject.getId())); + String repair = CheckAndRepairVolumeCmd.RepairValues.LEAKS.name().toLowerCase(); + CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); + volumeInfo.addPayload(payload); + checkAndRepairVolumeThroughHost(volumeInfo, host); + } + } + } + + @Override + public Pair checkAndRepairVolume(VolumeInfo volume) { + Long poolId = volume.getPoolId(); + List hostIds = _storageMgr.getUpHostsInPool(poolId); + if (CollectionUtils.isEmpty(hostIds)) { + throw new CloudRuntimeException("Unable to find Up hosts to run the check volume command"); + } + Collections.shuffle(hostIds); + Host host = _hostDao.findById(hostIds.get(0)); + + return checkAndRepairVolumeThroughHost(volume, host); + + } + + private Pair checkAndRepairVolumeThroughHost(VolumeInfo volume, Host host) { + Long poolId = volume.getPoolId(); + StoragePool pool = _storageMgr.getStoragePool(poolId); + CheckAndRepairVolumePayload payload = (CheckAndRepairVolumePayload) volume.getpayload(); + CheckAndRepairVolumeCommand command = new CheckAndRepairVolumeCommand(volume.getPath(), new StorageFilerTO(pool), payload.getRepair(), + volume.getPassphrase(), volume.getEncryptFormat()); + + try { + grantAccess(volume, host, volume.getDataStore()); + CheckAndRepairVolumeAnswer answer = (CheckAndRepairVolumeAnswer) _storageMgr.sendToPool(pool, new long[]{host.getId()}, command); + if (answer != null && answer.getResult()) { + logger.debug(String.format("Check volume response result: %s", answer.getDetails())); + return new Pair<>(answer.getVolumeCheckExecutionResult(), answer.getVolumeRepairExecutionResult()); + } else { + String errMsg = (answer == null) ? null : answer.getDetails(); + logger.debug(String.format("Failed to check and repair the volume with error %s", errMsg)); + } + + } catch (Exception e) { + logger.debug("sending check and repair volume command failed", e); + } finally { + revokeAccess(volume, host, volume.getDataStore()); + command.clearPassphrase(); + } + + return null; + } + // For managed storage on Xen and VMware, we need to potentially make space for hypervisor snapshots. // The disk offering can collect this information and pass it on to the volume that's about to be created. // Ex. if you want a 10 GB CloudStack volume to reside on managed storage on Xen, this leads to an SR diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java index f4c6df7dd40a..3a7fcfb6338e 100644 --- a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java +++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceTest.java @@ -19,15 +19,12 @@ package org.apache.cloudstack.storage.volume; -import com.cloud.storage.Storage; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.snapshot.SnapshotManager; + import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutionException; -import junit.framework.TestCase; + import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -42,6 +39,23 @@ import org.mockito.Spy; import org.mockito.junit.MockitoJUnitRunner; +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.exception.StorageUnavailableException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.CheckAndRepairVolumePayload; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.utils.Pair; + +import junit.framework.TestCase; + @RunWith(MockitoJUnitRunner.class) public class VolumeServiceTest extends TestCase{ @@ -66,15 +80,26 @@ public class VolumeServiceTest extends TestCase{ @Mock SnapshotManager snapshotManagerMock; + @Mock + StorageManager storageManagerMock; + @Mock VolumeVO volumeVoMock; + @Mock + HostVO hostMock; + + @Mock + HostDao hostDaoMock; + @Before public void setup(){ volumeServiceImplSpy = Mockito.spy(new VolumeServiceImpl()); volumeServiceImplSpy.volFactory = volumeDataFactoryMock; volumeServiceImplSpy.volDao = volumeDaoMock; volumeServiceImplSpy.snapshotMgr = snapshotManagerMock; + volumeServiceImplSpy._storageMgr = storageManagerMock; + volumeServiceImplSpy._hostDao = hostDaoMock; } @Test(expected = InterruptedException.class) @@ -213,4 +238,75 @@ public void validateDestroySourceVolumeAfterMigrationThrowAnyOtherException() th volumeServiceImplSpy.destroySourceVolumeAfterMigration(ObjectInDataStoreStateMachine.Event.DestroyRequested, null, volumeObject, volumeObject, true); } + + @Test + public void testCheckAndRepairVolume() throws StorageUnavailableException { + VolumeInfo volume = Mockito.mock(VolumeInfo.class); + Mockito.when(volume.getPoolId()).thenReturn(1L); + StoragePool pool = Mockito.mock(StoragePool.class); + Mockito.when(storageManagerMock.getStoragePool(1L)).thenReturn(pool); + List hostIds = new ArrayList<>(); + hostIds.add(1L); + Mockito.when(storageManagerMock.getUpHostsInPool(1L)).thenReturn(hostIds); + Mockito.when(hostMock.getId()).thenReturn(1L); + Mockito.when(hostDaoMock.findById(1L)).thenReturn(hostMock); + + CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(null); + Mockito.when(volume.getpayload()).thenReturn(payload); + Mockito.when(volume.getPath()).thenReturn("cbac516a-0f1f-4559-921c-1a7c6c408ccf"); + Mockito.when(volume.getPassphrase()).thenReturn(new byte[] {3, 1, 2, 3}); + Mockito.when(volume.getEncryptFormat()).thenReturn("LUKS"); + + String checkResult = "{\n" + + " \"image-end-offset\": 6442582016,\n" + + " \"total-clusters\": 163840,\n" + + " \"check-errors\": 0,\n" + + " \"leaks\": 124,\n" + + " \"allocated-clusters\": 98154,\n" + + " \"filename\": \"/var/lib/libvirt/images/26be20c7-b9d0-43f6-a76e-16c70737a0e0\",\n" + + " \"format\": \"qcow2\",\n" + + " \"fragmented-clusters\": 96135\n" + + "}"; + + CheckAndRepairVolumeCommand command = new CheckAndRepairVolumeCommand(volume.getPath(), new StorageFilerTO(pool), payload.getRepair(), + volume.getPassphrase(), volume.getEncryptFormat()); + + CheckAndRepairVolumeAnswer answer = new CheckAndRepairVolumeAnswer(command, true, checkResult); + answer.setVolumeCheckExecutionResult(checkResult); + Mockito.when(storageManagerMock.sendToPool(pool, new long[]{1L}, command)).thenReturn(answer); + + Pair result = volumeServiceImplSpy.checkAndRepairVolume(volume); + + Assert.assertEquals(result.first(), checkResult); + Assert.assertEquals(result.second(), null); + } + + @Test + public void testCheckAndRepairVolumeWhenFailure() throws StorageUnavailableException { + VolumeInfo volume = Mockito.mock(VolumeInfo.class); + Mockito.when(volume.getPoolId()).thenReturn(1L); + StoragePool pool = Mockito.mock(StoragePool.class); + Mockito.when(storageManagerMock.getStoragePool(1L)).thenReturn(pool); + List hostIds = new ArrayList<>(); + hostIds.add(1L); + Mockito.when(storageManagerMock.getUpHostsInPool(1L)).thenReturn(hostIds); + Mockito.when(hostMock.getId()).thenReturn(1L); + Mockito.when(hostDaoMock.findById(1L)).thenReturn(hostMock); + + CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(null); + Mockito.when(volume.getpayload()).thenReturn(payload); + Mockito.when(volume.getPath()).thenReturn("cbac516a-0f1f-4559-921c-1a7c6c408ccf"); + Mockito.when(volume.getPassphrase()).thenReturn(new byte[] {3, 1, 2, 3}); + Mockito.when(volume.getEncryptFormat()).thenReturn("LUKS"); + + CheckAndRepairVolumeCommand command = new CheckAndRepairVolumeCommand(volume.getPath(), new StorageFilerTO(pool), payload.getRepair(), + volume.getPassphrase(), volume.getEncryptFormat()); + + CheckAndRepairVolumeAnswer answer = new CheckAndRepairVolumeAnswer(command, false, "Unable to execute qemu command"); + Mockito.when(storageManagerMock.sendToPool(pool, new long[]{1L}, command)).thenReturn(answer); + + Pair result = volumeServiceImplSpy.checkAndRepairVolume(volume); + + Assert.assertEquals(null, result); + } } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java index 8d35bb9f4342..ff41a8141d76 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaAlertManagerImpl.java @@ -29,6 +29,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.quota.constant.QuotaConfig; import org.apache.cloudstack.quota.constant.QuotaConfig.QuotaEmailTemplateTypes; @@ -156,9 +157,9 @@ public void checkAndSendQuotaAlertEmails() { if (account == null) { continue; // the account is removed } - if (logger.isDebugEnabled()) { - logger.debug("checkAndSendQuotaAlertEmails: Check id=" + account.getId() + " bal=" + accountBalance + ", alertDate=" + alertDate + ", lockable=" + lockable); - } + logger.debug("checkAndSendQuotaAlertEmails: Check id={} bal={}, alertDate={}, lockable={}", account.getId(), + accountBalance, DateUtil.displayDateInTimezone(QuotaManagerImpl.getUsageAggregationTimeZone(), alertDate), + lockable); if (accountBalance.compareTo(zeroBalance) < 0) { if (_lockAccountEnforcement && (lockable == 1)) { if (_quotaManager.isLockable(account)) { diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java index 4293415755a7..9c15a47444ae 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/QuotaManagerImpl.java @@ -49,9 +49,9 @@ import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.jsinterpreter.JsInterpreter; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; -import org.apache.cloudstack.utils.usage.UsageUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; import org.springframework.stereotype.Component; @@ -85,8 +85,7 @@ public class QuotaManagerImpl extends ManagerBase implements QuotaManager { @Inject protected PresetVariableHelper presetVariableHelper; - private TimeZone _usageTimezone; - private int _aggregationDuration = 0; + private static TimeZone usageAggregationTimeZone = TimeZone.getTimeZone("GMT"); static final BigDecimal GiB_DECIMAL = BigDecimal.valueOf(ByteScaleUtils.GiB); List lockablesAccountTypes = Arrays.asList(Account.Type.NORMAL, Account.Type.DOMAIN_ADMIN); @@ -112,24 +111,16 @@ public boolean configure(String name, Map params) throws Configu mergeConfigs(configs, params); } - String aggregationRange = configs.get("usage.stats.job.aggregation.range"); - String timeZoneStr = configs.get("usage.aggregation.timezone"); - - if (timeZoneStr == null) { - timeZoneStr = "GMT"; - } - _usageTimezone = TimeZone.getTimeZone(timeZoneStr); - - _aggregationDuration = Integer.parseInt(aggregationRange); - if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); - _aggregationDuration = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; - } - logger.info("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); + String usageAggregationTimeZoneStr = ObjectUtils.defaultIfNull(configs.get("usage.aggregation.timezone"), "GMT"); + usageAggregationTimeZone = TimeZone.getTimeZone(usageAggregationTimeZoneStr); return true; } + public static TimeZone getUsageAggregationTimeZone() { + return usageAggregationTimeZone; + } + @Override public boolean start() { if (logger.isInfoEnabled()) { @@ -158,8 +149,9 @@ protected void processQuotaBalanceForAccount(AccountVO accountVo, List creditsReceived = _quotaBalanceDao.findCreditBalance(accountId, domainId, startDate, endDate); - logger.debug(String.format("Account [%s] has [%s] credit entries before [%s].", accountToString, creditsReceived.size(), endDate)); + logger.debug("Account [{}] has [{}] credit entries before [{}].", accountToString, creditsReceived.size(), + DateUtil.displayDateInTimezone(usageAggregationTimeZone, endDate)); BigDecimal aggregatedUsage = BigDecimal.ZERO; - logger.debug(String.format("Aggregating the account [%s] credit entries before [%s].", accountToString, endDate)); + logger.debug("Aggregating the account [{}] credit entries before [{}].", accountToString, + DateUtil.displayDateInTimezone(usageAggregationTimeZone, endDate)); for (QuotaBalanceVO credit : creditsReceived) { aggregatedUsage = aggregatedUsage.add(credit.getCreditBalance()); } - logger.debug(String.format("The aggregation of the account [%s] credit entries before [%s] resulted in the value [%s].", accountToString, endDate, aggregatedUsage)); + logger.debug("The aggregation of the account [{}] credit entries before [{}] resulted in the value [{}].", + accountToString, DateUtil.displayDateInTimezone(usageAggregationTimeZone, endDate), aggregatedUsage); return aggregatedUsage; } @@ -306,7 +301,7 @@ protected List getPendingUsageRecordsForQuotaAggregation(AccountVO acco protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO account, List usageRecords, Map, Boolean>> mapQuotaTariffsPerUsageType) { String accountToString = account.reflectionToString(); - logger.info(String.format("Calculating quota usage of [%s] usage records for account [%s].", usageRecords.size(), accountToString)); + logger.info("Calculating quota usage of [{}] usage records for account [{}].", usageRecords.size(), accountToString); List> pairsUsageAndQuotaUsage = new ArrayList<>(); @@ -314,7 +309,7 @@ protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO for (UsageVO usageRecord : usageRecords) { int usageType = usageRecord.getUsageType(); - if (Boolean.FALSE.equals(shouldCalculateUsageRecord(account,usageRecord))) { + if (!shouldCalculateUsageRecord(account, usageRecord)) { pairsUsageAndQuotaUsage.add(new Pair<>(usageRecord, null)); continue; } @@ -339,8 +334,8 @@ protected List createQuotaUsagesAccordingToQuotaTariffs(AccountVO protected boolean shouldCalculateUsageRecord(AccountVO accountVO, UsageVO usageRecord) { if (Boolean.FALSE.equals(QuotaConfig.QuotaAccountEnabled.valueIn(accountVO.getAccountId()))) { - logger.debug(String.format("Considering usage record [%s] as calculated and skipping it because account [%s] has the quota plugin disabled.", - usageRecord, accountVO.reflectionToString())); + logger.debug("Considering usage record [{}] as calculated and skipping it because account [{}] has the quota plugin disabled.", + usageRecord.toString(usageAggregationTimeZone), accountVO.reflectionToString()); return false; } return true; @@ -366,9 +361,8 @@ protected List persistUsagesAndQuotaUsagesAndRetrievePersistedQuot protected BigDecimal aggregateQuotaTariffsValues(UsageVO usageRecord, List quotaTariffs, boolean hasAnyQuotaTariffWithActivationRule, JsInterpreter jsInterpreter, String accountToString) { - String usageRecordToString = usageRecord.toString(); - logger.debug(String.format("Validating usage record [%s] for account [%s] against [%s] quota tariffs.", usageRecordToString, accountToString, - quotaTariffs.size())); + String usageRecordToString = usageRecord.toString(usageAggregationTimeZone); + logger.debug("Validating usage record [{}] for account [{}] against [{}] quota tariffs.", usageRecordToString, accountToString, quotaTariffs.size()); PresetVariables presetVariables = getPresetVariables(hasAnyQuotaTariffWithActivationRule, usageRecord); BigDecimal aggregatedQuotaTariffsValue = BigDecimal.ZERO; @@ -406,7 +400,7 @@ protected PresetVariables getPresetVariables(boolean hasAnyQuotaTariffWithActiva protected BigDecimal getQuotaTariffValueToBeApplied(QuotaTariffVO quotaTariff, JsInterpreter jsInterpreter, PresetVariables presetVariables) { String activationRule = quotaTariff.getActivationRule(); BigDecimal quotaTariffValue = quotaTariff.getCurrencyValue(); - String quotaTariffToString = quotaTariff.toString(); + String quotaTariffToString = quotaTariff.toString(usageAggregationTimeZone); if (StringUtils.isEmpty(activationRule)) { logger.debug(String.format("Quota tariff [%s] does not have an activation rule, therefore we will use the quota tariff value [%s] in the calculation.", @@ -468,10 +462,11 @@ protected boolean isQuotaTariffInPeriodToBeApplied(UsageVO usageRecord, QuotaTar Date quotaTariffEndDate = quotaTariff.getEndDate(); if ((quotaTariffEndDate != null && usageRecordStartDate.after(quotaTariffEndDate)) || usageRecordEndDate.before(quotaTariffStartDate)) { - logger.debug(String.format("Not applying quota tariff [%s] in usage record [%s] of account [%s] due to it is out of the period to be applied. Period of the usage" - + " record [startDate: %s, endDate: %s], period of the quota tariff [startDate: %s, endDate: %s].", quotaTariff, usageRecord.toString(), accountToString, - DateUtil.getOutputString(usageRecordStartDate), DateUtil.getOutputString(usageRecordEndDate), DateUtil.getOutputString(quotaTariffStartDate), - DateUtil.getOutputString(quotaTariffEndDate))); + logger.debug("Not applying quota tariff [{}] in usage record [{}] of account [{}] due to it is out of the period to be applied. Period of the usage" + + " record [startDate: {}, endDate: {}], period of the quota tariff [startDate: {}, endDate: {}].", quotaTariff.toString(usageAggregationTimeZone), + usageRecord.toString(usageAggregationTimeZone), accountToString, DateUtil.displayDateInTimezone(usageAggregationTimeZone, usageRecordStartDate), + DateUtil.displayDateInTimezone(usageAggregationTimeZone, usageRecordEndDate), DateUtil.displayDateInTimezone(usageAggregationTimeZone, quotaTariffStartDate), + DateUtil.displayDateInTimezone(usageAggregationTimeZone, quotaTariffEndDate)); return false; } @@ -497,11 +492,11 @@ protected Map, Boolean>> createMapQuotaTariffs } protected QuotaUsageVO createQuotaUsageAccordingToUsageUnit(UsageVO usageRecord, BigDecimal aggregatedQuotaTariffsValue, String accountToString) { - String usageRecordToString = usageRecord.toString(); + String usageRecordToString = usageRecord.toString(usageAggregationTimeZone); if (aggregatedQuotaTariffsValue.equals(BigDecimal.ZERO)) { - logger.debug(String.format("Usage record [%s] for account [%s] does not have quota tariffs to be calculated, therefore we will mark it as calculated.", - usageRecordToString, accountToString)); + logger.debug("No tariffs were applied to usage record [{}] of account [{}] or they resulted in 0; We will only mark the usage record as calculated.", + usageRecordToString, accountToString); return null; } diff --git a/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java b/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java index 7bc2870e36a1..5b07cadb94ae 100644 --- a/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java +++ b/framework/quota/src/main/java/org/apache/cloudstack/quota/vo/QuotaTariffVO.java @@ -16,11 +16,13 @@ //under the License. package org.apache.cloudstack.quota.vo; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.InternalIdentity; import org.apache.cloudstack.quota.constant.QuotaTypes; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import com.cloud.utils.db.GenericDao; +import org.apache.commons.lang3.StringUtils; import javax.persistence.Column; import javax.persistence.Entity; @@ -33,6 +35,7 @@ import java.math.BigDecimal; import java.util.Date; +import java.util.TimeZone; import java.util.UUID; @Entity @@ -261,6 +264,12 @@ public boolean setUsageTypeData(int usageType) { @Override public String toString() { - return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "effectiveOn", "endDate"); - }; + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "uuid", "name", "usageName"); + } + + public String toString(TimeZone timeZone) { + String startDateString = DateUtil.displayDateInTimezone(timeZone, getEffectiveOn()); + String endDateString = DateUtil.displayDateInTimezone(timeZone, getEndDate()); + return String.format("%s,\"startDate\":\"%s\",\"endDate\":\"%s\"}", StringUtils.chop(this.toString()), startDateString, endDateString); + } } diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml index f1cbf3f38451..d1ffff3c7bdd 100644 --- a/framework/rest/pom.xml +++ b/framework/rest/pom.xml @@ -68,7 +68,7 @@ com.sun.xml.bind jaxb-impl - ${cs.jaxb.version} + ${cs.jaxb.impl.version} org.apache.cxf diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index 086d2207d097..37fe007e3fb2 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -52,7 +52,7 @@ intelligent IaaS cloud implementation. %package management Summary: CloudStack management server UI -Requires: java-11-openjdk +Requires: java-17-openjdk Requires: (tzdata-java or timezone-java) Requires: python3 Requires: bash @@ -98,7 +98,7 @@ The Apache CloudStack files shared between agent and management server %package agent Summary: CloudStack Agent for KVM hypervisors Requires: (openssh-clients or openssh) -Requires: java-11-openjdk +Requires: java-17-openjdk Requires: tzdata-java Requires: %{name}-common = %{_ver} Requires: libvirt @@ -135,7 +135,7 @@ The CloudStack baremetal agent %package usage Summary: CloudStack Usage calculation server -Requires: java-11-openjdk +Requires: java-17-openjdk Requires: tzdata-java Group: System Environment/Libraries %description usage @@ -556,8 +556,8 @@ if [ -f "/usr/share/cloudstack-common/scripts/installer/cloudstack-help-text" ]; fi %post marvin -pip install --upgrade https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl -pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz +pip3 install --upgrade https://files.pythonhosted.org/packages/08/1f/42d74bae9dd6dcfec67c9ed0f3fa482b1ae5ac5f117ca82ab589ecb3ca19/mysql_connector_python-8.0.31-py2.py3-none-any.whl +pip3 install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz #No default permission as the permission setup is complex %files management diff --git a/packaging/systemd/cloudstack-management.default b/packaging/systemd/cloudstack-management.default index 252fb4b78f6c..ca8ff628fc19 100644 --- a/packaging/systemd/cloudstack-management.default +++ b/packaging/systemd/cloudstack-management.default @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err " +JAVA_OPTS="-Djava.security.properties=/etc/cloudstack/management/java.security.ciphers -Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2G -XX:+UseParallelGC -XX:MaxGCPauseMillis=500 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:ErrorFile=/var/log/cloudstack/management/cloudstack-management.err --add-opens=java.base/java.lang=ALL-UNNAMED --add-exports=java.base/sun.security.x509=ALL-UNNAMED" CLASSPATH="/usr/share/cloudstack-management/lib/*:/etc/cloudstack/management:/usr/share/cloudstack-common:/usr/share/cloudstack-management/setup:/usr/share/cloudstack-management:/usr/share/java/mysql-connector-java.jar:/usr/share/cloudstack-mysql-ha/lib/*" @@ -24,7 +24,7 @@ BOOTSTRAP_CLASS=org.apache.cloudstack.ServerDaemon ################################################################################################ #You can uncomment one of these options if you want to enable Java remote debugging. # #You can change the parameters at your will. The 'address' field defines the port to be used. # -################################################################################################ +################################################################################################ # This option here should be used with 'systemmd' based operating systems such as CentOS7, Ubuntu 16, and so on. #JAVA_DEBUG="-agentlib:jdwp=transport=dt_socket,address=*:8000,server=y,suspend=n" diff --git a/packaging/systemd/cloudstack-usage.default b/packaging/systemd/cloudstack-usage.default index 089f3eec09cd..493f40c277a2 100644 --- a/packaging/systemd/cloudstack-usage.default +++ b/packaging/systemd/cloudstack-usage.default @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -JAVA_OPTS="-Xms256m -Xmx2048m" +JAVA_OPTS="-Xms256m -Xmx2048m --add-opens=java.base/java.lang=ALL-UNNAMED" CLASSPATH="/usr/share/cloudstack-usage/*:/usr/share/cloudstack-usage/lib/*:/usr/share/cloudstack-mysql-ha/lib/*:/etc/cloudstack/usage:/usr/share/java/mysql-connector-java.jar" diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java index eb667b00c3bc..d7f2832831ab 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/response/QuotaResponseBuilderImpl.java @@ -34,6 +34,7 @@ import javax.inject.Inject; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.command.QuotaBalanceCmd; @@ -44,6 +45,7 @@ import org.apache.cloudstack.api.command.QuotaTariffListCmd; import org.apache.cloudstack.api.command.QuotaTariffUpdateCmd; import org.apache.cloudstack.quota.QuotaManager; +import org.apache.cloudstack.quota.QuotaManagerImpl; import org.apache.cloudstack.quota.QuotaService; import org.apache.cloudstack.quota.QuotaStatement; import org.apache.cloudstack.quota.constant.QuotaConfig; @@ -470,12 +472,14 @@ protected void validateEndDateOnCreatingNewQuotaTariff(QuotaTariffVO newQuotaTar } if (endDate.compareTo(startDate) < 0) { - throw new InvalidParameterValueException(String.format("The quota tariff's end date [%s] cannot be less than the start date [%s]", endDate, startDate)); + throw new InvalidParameterValueException(String.format("The quota tariff's end date [%s] cannot be less than the start date [%s].", + endDate, startDate)); } Date now = _quotaService.computeAdjustedTime(new Date()); if (endDate.compareTo(now) < 0) { - throw new InvalidParameterValueException(String.format("The quota tariff's end date [%s] cannot be less than now [%s].", endDate, now)); + throw new InvalidParameterValueException(String.format("The quota tariff's end date [%s] cannot be less than now [%s].", + endDate, now)); } newQuotaTariff.setEndDate(endDate); @@ -487,7 +491,8 @@ public QuotaCreditsResponse addQuotaCredits(Long accountId, Long domainId, Doubl QuotaBalanceVO qb = _quotaBalanceDao.findLaterBalanceEntry(accountId, domainId, despositedOn); if (qb != null) { - throw new InvalidParameterValueException("Incorrect deposit date: " + despositedOn + " there are balance entries after this date"); + throw new InvalidParameterValueException(String.format("Incorrect deposit date [%s], as there are balance entries after this date.", + despositedOn)); } QuotaCreditsVO credits = new QuotaCreditsVO(accountId, domainId, new BigDecimal(amount), updatedBy); @@ -500,9 +505,8 @@ public QuotaCreditsResponse addQuotaCredits(Long accountId, Long domainId, Doubl } final boolean lockAccountEnforcement = "true".equalsIgnoreCase(QuotaConfig.QuotaEnableEnforcement.value()); final BigDecimal currentAccountBalance = _quotaBalanceDao.lastQuotaBalance(accountId, domainId, startOfNextDay(new Date(despositedOn.getTime()))); - if (logger.isDebugEnabled()) { - logger.debug("AddQuotaCredits: Depositing " + amount + " on adjusted date " + despositedOn + ", current balance " + currentAccountBalance); - } + logger.debug("Depositing [{}] credits on adjusted date [{}]; current balance is [{}].", amount, + DateUtil.displayDateInTimezone(QuotaManagerImpl.getUsageAggregationTimeZone(), despositedOn), currentAccountBalance); // update quota account with the balance _quotaService.saveQuotaAccount(account, currentAccountBalance, despositedOn); if (lockAccountEnforcement) { @@ -581,9 +585,10 @@ public QuotaBalanceResponse createQuotaLastBalanceResponse(List QuotaBalanceResponse resp = new QuotaBalanceResponse(); BigDecimal lastCredits = new BigDecimal(0); for (QuotaBalanceVO entry : quotaBalance) { - if (logger.isDebugEnabled()) { - logger.debug("createQuotaLastBalanceResponse Date=" + entry.getUpdatedOn() + " balance=" + entry.getCreditBalance() + " credit=" + entry.getCreditsId()); - } + logger.debug("createQuotaLastBalanceResponse Date={} balance={} credit={}", + DateUtil.displayDateInTimezone(QuotaManagerImpl.getUsageAggregationTimeZone(), entry.getUpdatedOn()), + entry.getCreditBalance(), entry.getCreditsId()); + lastCredits = lastCredits.add(entry.getCreditBalance()); } resp.setStartQuota(lastCredits); @@ -638,7 +643,8 @@ public QuotaTariffVO createQuotaTariff(QuotaTariffCreateCmd cmd) { } if (startDate.compareTo(now) < 0) { - throw new InvalidParameterValueException(String.format("The quota tariff's start date [%s] cannot be less than now [%s]", startDate, now)); + throw new InvalidParameterValueException(String.format("The value passed as Quota tariff's start date is in the past: [%s]. " + + "Please, inform a date in the future or do not pass the parameter to use the current date and time.", startDate)); } return persistNewQuotaTariff(null, name, usageType, startDate, cmd.getEntityOwnerId(), endDate, value, description, activationRule); diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java index bd08f6afd7ef..88a69c47e055 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/quota/QuotaServiceImpl.java @@ -51,7 +51,7 @@ import org.apache.cloudstack.quota.vo.QuotaAccountVO; import org.apache.cloudstack.quota.vo.QuotaBalanceVO; import org.apache.cloudstack.quota.vo.QuotaUsageVO; -import org.apache.cloudstack.utils.usage.UsageUtils; +import org.apache.commons.lang3.ObjectUtils; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; @@ -83,7 +83,6 @@ public class QuotaServiceImpl extends ManagerBase implements QuotaService, Confi private QuotaResponseBuilder _respBldr; private TimeZone _usageTimezone; - private int _aggregationDuration = 0; public QuotaServiceImpl() { super(); @@ -92,21 +91,10 @@ public QuotaServiceImpl() { @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - String timeZoneStr = _configDao.getValue(Config.UsageAggregationTimezone.toString()); - String aggregationRange = _configDao.getValue(Config.UsageStatsJobAggregationRange.toString()); - if (timeZoneStr == null) { - timeZoneStr = "GMT"; - } + + String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT"); _usageTimezone = TimeZone.getTimeZone(timeZoneStr); - _aggregationDuration = Integer.parseInt(aggregationRange); - if (_aggregationDuration < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) { - logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + UsageUtils.USAGE_AGGREGATION_RANGE_MIN); - _aggregationDuration = UsageUtils.USAGE_AGGREGATION_RANGE_MIN; - } - if (logger.isDebugEnabled()) { - logger.debug("Usage timezone = " + _usageTimezone + " AggregationDuration=" + _aggregationDuration); - } return true; } diff --git a/plugins/database/quota/src/test/java/org/apache/cloudstack/quota/QuotaServiceImplTest.java b/plugins/database/quota/src/test/java/org/apache/cloudstack/quota/QuotaServiceImplTest.java index 0b9d34daa709..fa58c35ea5d5 100644 --- a/plugins/database/quota/src/test/java/org/apache/cloudstack/quota/QuotaServiceImplTest.java +++ b/plugins/database/quota/src/test/java/org/apache/cloudstack/quota/QuotaServiceImplTest.java @@ -99,7 +99,6 @@ public void setup() throws IllegalAccessException, NoSuchFieldException, Configu QuotaResponseBuilderField.set(quotaService, respBldr); Mockito.when(configDao.getValue(Mockito.eq(Config.UsageAggregationTimezone.toString()))).thenReturn("IST"); - Mockito.when(configDao.getValue(Mockito.eq(Config.UsageStatsJobAggregationRange.toString()))).thenReturn("1"); quotaService.configure("randomName", null); } diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml index ecbde4b7550a..d866c9b47e2f 100755 --- a/plugins/hypervisors/baremetal/pom.xml +++ b/plugins/hypervisors/baremetal/pom.xml @@ -45,7 +45,7 @@ com.sun.xml.bind jaxb-impl - ${cs.jaxb.version} + ${cs.jaxb.impl.version} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapper.java new file mode 100644 index 000000000000..cd81a2fbc232 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapper.java @@ -0,0 +1,192 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.exception.CloudRuntimeException; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.cloudstack.utils.cryptsetup.KeyFile; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.cloudstack.utils.qemu.QemuObject; +import org.apache.cloudstack.utils.qemu.QemuObject.EncryptFormat; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.log4j.Logger; +import org.libvirt.LibvirtException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +@ResourceWrapper(handles = CheckAndRepairVolumeCommand.class) +public class LibvirtCheckAndRepairVolumeCommandWrapper extends CommandWrapper { + + private static final Logger s_logger = Logger.getLogger(LibvirtCheckAndRepairVolumeCommandWrapper.class); + + @Override + public Answer execute(CheckAndRepairVolumeCommand command, LibvirtComputingResource serverResource) { + final String volumeId = command.getPath(); + final String repair = command.getRepair(); + final StorageFilerTO spool = command.getPool(); + + final KVMStoragePoolManager storagePoolMgr = serverResource.getStoragePoolMgr(); + KVMStoragePool pool = storagePoolMgr.getStoragePool(spool.getType(), spool.getUuid()); + final KVMPhysicalDisk vol = pool.getPhysicalDisk(volumeId); + byte[] passphrase = command.getPassphrase(); + + try { + CheckAndRepairVolumeAnswer answer = null; + String checkVolumeResult = null; + if (QemuImg.PhysicalDiskFormat.RAW.equals(vol.getFormat())) { + checkVolumeResult = "Volume format RAW is not supported to check and repair"; + String jsonStringFormat = String.format("{ \"message\": \"%s\" }", checkVolumeResult); + answer = new CheckAndRepairVolumeAnswer(command, true, checkVolumeResult); + answer.setVolumeCheckExecutionResult(jsonStringFormat); + + return answer; + } else { + answer = checkVolume(vol, command, serverResource); + checkVolumeResult = answer.getVolumeCheckExecutionResult(); + } + + CheckAndRepairVolumeAnswer resultAnswer = checkIfRepairLeaksIsRequired(command, checkVolumeResult, vol.getName()); + // resultAnswer is not null when repair is not required, so return from here + if (resultAnswer != null) { + return resultAnswer; + } + + if (StringUtils.isNotEmpty(repair)) { + answer = repairVolume(vol, command, serverResource, checkVolumeResult); + } + + return answer; + } catch (Exception e) { + return new CheckAndRepairVolumeAnswer(command, false, e.toString()); + } finally { + if (passphrase != null) { + Arrays.fill(passphrase, (byte) 0); + } + } + } + + private CheckAndRepairVolumeAnswer checkVolume(KVMPhysicalDisk vol, CheckAndRepairVolumeCommand command, LibvirtComputingResource serverResource) { + EncryptFormat encryptFormat = EncryptFormat.enumValue(command.getEncryptFormat()); + byte[] passphrase = command.getPassphrase(); + String checkVolumeResult = checkAndRepairVolume(vol, null, encryptFormat, passphrase, serverResource); + s_logger.info(String.format("Check Volume result for the volume %s is %s", vol.getName(), checkVolumeResult)); + CheckAndRepairVolumeAnswer answer = new CheckAndRepairVolumeAnswer(command, true, checkVolumeResult); + answer.setVolumeCheckExecutionResult(checkVolumeResult); + + return answer; + } + + private CheckAndRepairVolumeAnswer repairVolume(KVMPhysicalDisk vol, CheckAndRepairVolumeCommand command, LibvirtComputingResource serverResource, String checkVolumeResult) { + EncryptFormat encryptFormat = EncryptFormat.enumValue(command.getEncryptFormat()); + byte[] passphrase = command.getPassphrase(); + final String repair = command.getRepair(); + + String repairVolumeResult = checkAndRepairVolume(vol, repair, encryptFormat, passphrase, serverResource); + String finalResult = (checkVolumeResult != null ? checkVolumeResult.concat(",") : "") + repairVolumeResult; + s_logger.info(String.format("Repair Volume result for the volume %s is %s", vol.getName(), repairVolumeResult)); + + CheckAndRepairVolumeAnswer answer = new CheckAndRepairVolumeAnswer(command, true, finalResult); + answer.setVolumeRepairExecutionResult(repairVolumeResult); + answer.setVolumeCheckExecutionResult(checkVolumeResult); + + return answer; + } + + private CheckAndRepairVolumeAnswer checkIfRepairLeaksIsRequired(CheckAndRepairVolumeCommand command, String checkVolumeResult, String volumeName) { + final String repair = command.getRepair(); + int leaks = 0; + if (StringUtils.isNotEmpty(checkVolumeResult) && StringUtils.isNotEmpty(repair) && repair.equals("leaks")) { + ObjectMapper objectMapper = new ObjectMapper(); + JsonNode jsonNode = null; + try { + jsonNode = objectMapper.readTree(checkVolumeResult); + } catch (JsonProcessingException e) { + String msg = String.format("Error processing response %s during check volume %s", checkVolumeResult, e.getMessage()); + s_logger.info(msg); + + return skipRepairVolumeCommand(command, checkVolumeResult, msg); + } + JsonNode leaksNode = jsonNode.get("leaks"); + if (leaksNode != null) { + leaks = leaksNode.asInt(); + } + + if (leaks == 0) { + String msg = String.format("No leaks found while checking for the volume %s, so skipping repair", volumeName); + return skipRepairVolumeCommand(command, checkVolumeResult, msg); + } + } + + return null; + } + + private CheckAndRepairVolumeAnswer skipRepairVolumeCommand(CheckAndRepairVolumeCommand command, String checkVolumeResult, String msg) { + s_logger.info(msg); + String jsonStringFormat = String.format("{ \"message\": \"%s\" }", msg); + String finalResult = (checkVolumeResult != null ? checkVolumeResult.concat(",") : "") + jsonStringFormat; + CheckAndRepairVolumeAnswer answer = new CheckAndRepairVolumeAnswer(command, true, finalResult); + answer.setVolumeRepairExecutionResult(jsonStringFormat); + answer.setVolumeCheckExecutionResult(checkVolumeResult); + + return answer; + } + + protected String checkAndRepairVolume(final KVMPhysicalDisk vol, final String repair, final EncryptFormat encryptFormat, byte[] passphrase, final LibvirtComputingResource libvirtComputingResource) throws CloudRuntimeException { + List passphraseObjects = new ArrayList<>(); + QemuImageOptions imgOptions = null; + if (ArrayUtils.isEmpty(passphrase)) { + passphrase = null; + } + try (KeyFile keyFile = new KeyFile(passphrase)) { + if (passphrase != null) { + passphraseObjects.add( + QemuObject.prepareSecretForQemuImg(vol.getFormat(), encryptFormat, keyFile.toString(), "sec0", null) + ); + imgOptions = new QemuImageOptions(vol.getFormat(), vol.getPath(),"sec0"); + } + QemuImg q = new QemuImg(libvirtComputingResource.getCmdsTimeout()); + QemuImgFile file = new QemuImgFile(vol.getPath()); + return q.checkAndRepair(file, imgOptions, passphraseObjects, repair); + } catch (QemuImgException | LibvirtException ex) { + throw new CloudRuntimeException("Failed to run qemu-img for check volume", ex); + } catch (IOException ex) { + throw new CloudRuntimeException("Failed to create keyfile for encrypted volume for check volume operation", ex); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java index a9e4d0da4320..2c8918e620dd 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPatchSystemVmCommandWrapper.java @@ -77,6 +77,7 @@ public Answer execute(PatchSystemVmCommand cmd, LibvirtComputingResource serverR if (patchResult.first()) { String scriptVersion = lines[1]; if (StringUtils.isNotEmpty(patchResult.second())) { + logger.debug("Patch result of systemVM {}: {}", sysVMName, patchResult.second()); String res = patchResult.second().replace("\n", " "); String[] output = res.split(":"); if (output.length != 2) { diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java index d0736019469f..1ddc16c89453 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -813,4 +813,45 @@ protected static boolean helpSupportsImageFormat(String text, QemuImg.PhysicalDi Pattern pattern = Pattern.compile("Supported\\sformats:[a-zA-Z0-9-_\\s]*?\\b" + format + "\\b", CASE_INSENSITIVE); return pattern.matcher(text).find(); } + + /** + * check for any leaks for an image and repair. + * + * @param imageOptions + * Qemu style image options to be used in the checking process. + * @param qemuObjects + * Qemu style options (e.g. for passing secrets). + * @param repair + * Boolean option whether to repair any leaks + */ + public String checkAndRepair(final QemuImgFile file, final QemuImageOptions imageOptions, final List qemuObjects, final String repair) throws QemuImgException { + final Script script = new Script(_qemuImgPath); + script.add("check"); + if (imageOptions == null) { + script.add(file.getFileName()); + } + + for (QemuObject o : qemuObjects) { + script.add(o.toCommandFlag()); + } + + if (imageOptions != null) { + script.add(imageOptions.toCommandFlag()); + } + + if (StringUtils.isNotEmpty(repair)) { + script.add("-r"); + script.add(repair); + } + + script.add("--output=json"); + script.add("2>/dev/null"); + + final String result = Script.runBashScriptIgnoreExitValue(script.toString(), 3); + if (result != null) { + logger.debug(String.format("Check volume execution result %s", result)); + } + + return result; + } } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapperTest.java new file mode 100644 index 000000000000..e2120e46d130 --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckAndRepairVolumeCommandWrapperTest.java @@ -0,0 +1,98 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.storage.CheckAndRepairVolumeAnswer; +import com.cloud.agent.api.storage.CheckAndRepairVolumeCommand; +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMPhysicalDisk; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.storage.Storage; + +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class LibvirtCheckAndRepairVolumeCommandWrapperTest { + + @Spy + LibvirtCheckAndRepairVolumeCommandWrapper libvirtCheckAndRepairVolumeCommandWrapperSpy = Mockito.spy(LibvirtCheckAndRepairVolumeCommandWrapper.class); + + @Mock + LibvirtComputingResource libvirtComputingResourceMock; + + @Mock + CheckAndRepairVolumeCommand checkAndRepairVolumeCommand; + + @Mock + QemuImg qemuImgMock; + + @Before + public void init() { + when(libvirtComputingResourceMock.getCmdsTimeout()).thenReturn(60); + } + + @Test + public void testCheckAndRepairVolume() throws Exception { + + CheckAndRepairVolumeCommand cmd = Mockito.mock(CheckAndRepairVolumeCommand.class); + when(cmd.getPath()).thenReturn("cbac516a-0f1f-4559-921c-1a7c6c408ccf"); + when(cmd.getRepair()).thenReturn(null); + StorageFilerTO spool = Mockito.mock(StorageFilerTO.class); + when(cmd.getPool()).thenReturn(spool); + + KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class); + when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr); + KVMStoragePool pool = Mockito.mock(KVMStoragePool.class); + when(spool.getType()).thenReturn(Storage.StoragePoolType.PowerFlex); + when(spool.getUuid()).thenReturn("b6be258b-42b8-49a4-ad51-3634ef8ff76a"); + when(storagePoolMgr.getStoragePool(Storage.StoragePoolType.PowerFlex, "b6be258b-42b8-49a4-ad51-3634ef8ff76a")).thenReturn(pool); + + KVMPhysicalDisk vol = Mockito.mock(KVMPhysicalDisk.class); + when(pool.getPhysicalDisk("cbac516a-0f1f-4559-921c-1a7c6c408ccf")).thenReturn(vol); + Mockito.when(vol.getFormat()).thenReturn(QemuImg.PhysicalDiskFormat.QCOW2); + + String checkResult = "{\n" + + " \"image-end-offset\": 6442582016,\n" + + " \"total-clusters\": 163840,\n" + + " \"check-errors\": 0,\n" + + " \"leaks\": 124,\n" + + " \"allocated-clusters\": 98154,\n" + + " \"filename\": \"/var/lib/libvirt/images/26be20c7-b9d0-43f6-a76e-16c70737a0e0\",\n" + + " \"format\": \"qcow2\",\n" + + " \"fragmented-clusters\": 96135\n" + + "}"; + + try (MockedConstruction ignored = Mockito.mockConstruction(QemuImg.class, (mock, context) -> { + when(mock.checkAndRepair(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(checkResult); + })) { + CheckAndRepairVolumeAnswer result = (CheckAndRepairVolumeAnswer) libvirtCheckAndRepairVolumeCommandWrapperSpy.execute(cmd, libvirtComputingResourceMock); + Assert.assertEquals(checkResult, result.getVolumeCheckExecutionResult()); + } + } + +} diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePoolTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePoolTest.java index b2c58fd9b96a..88d4daa2dabc 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePoolTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePoolTest.java @@ -87,6 +87,9 @@ public void testExternalSnapshot() { StoragePool storage = Mockito.mock(StoragePool.class); LibvirtStoragePool nfsPool = new LibvirtStoragePool(uuid, name, StoragePoolType.NetworkFilesystem, adapter, storage); + if (nfsPool.getType() != StoragePoolType.NetworkFilesystem) { + System.out.println("tested"); + } assertFalse(nfsPool.isExternalSnapshot()); LibvirtStoragePool rbdPool = new LibvirtStoragePool(uuid, name, StoragePoolType.RBD, adapter, storage); diff --git a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java index 8bb762cca852..b0981dde26e7 100644 --- a/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java +++ b/plugins/hypervisors/kvm/src/test/java/org/apache/cloudstack/utils/qemu/QemuImgTest.java @@ -368,4 +368,21 @@ public void testHelpSupportsImageFormat() throws QemuImgException, LibvirtExcept Assert.assertTrue("should support qcow2", QemuImg.helpSupportsImageFormat(partialHelp, PhysicalDiskFormat.QCOW2)); Assert.assertFalse("should not support http", QemuImg.helpSupportsImageFormat(partialHelp, PhysicalDiskFormat.SHEEPDOG)); } + + @Test + public void testCheckAndRepair() throws LibvirtException { + String filename = "/tmp/" + UUID.randomUUID() + ".qcow2"; + + QemuImgFile file = new QemuImgFile(filename); + + try { + QemuImg qemu = new QemuImg(0); + qemu.checkAndRepair(file, null, null, null); + } catch (QemuImgException e) { + fail(e.getMessage()); + } + + File f = new File(filename); + f.delete(); + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index bafe52b4d79d..10ad8f2176a1 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4769,7 +4769,13 @@ protected Answer execute(MigrateCommand cmd) { final String vmName = cmd.getVmName(); try { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); + if (hyperHost == null) { + throw new CloudRuntimeException("no hypervisor host found for migrate command"); + } ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); + if (morDc == null) { + throw new CloudRuntimeException("no Managed Object Reference for the Data Center found for migrate command"); + } // find VM through datacenter (VM is not at the target host yet) VirtualMachineMO vmMo = hyperHost.findVmOnPeerHyperHost(vmName); @@ -4780,6 +4786,9 @@ protected Answer execute(MigrateCommand cmd) { } VmwareHypervisorHost destHyperHost = getTargetHyperHost(new DatacenterMO(hyperHost.getContext(), morDc), cmd.getDestinationIp()); + if (destHyperHost == null) { + throw new CloudRuntimeException("no destination Hypervisor Host found for migrate command"); + } ManagedObjectReference morTargetPhysicalHost = destHyperHost.findMigrationTarget(vmMo); if (morTargetPhysicalHost == null) { @@ -4791,7 +4800,8 @@ protected Answer execute(MigrateCommand cmd) { } return new MigrateAnswer(cmd, true, "migration succeeded", null); - } catch (Throwable e) { + } catch (Exception e) { + logger.info(String.format("migrate command for %s failed due to %s", vmName, e.getLocalizedMessage())); return new MigrateAnswer(cmd, false, createLogMessageException(e, cmd), null); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 4cdc30a5850d..4718fc4bad99 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -102,7 +102,9 @@ private KubernetesSupportedVersionResponse createKubernetesSupportedVersionRespo if (template != null) { response.setIsoId(template.getUuid()); response.setIsoName(template.getName()); - response.setIsoState(template.getState().toString()); + if (template.getState() != null) { + response.setIsoState(template.getState().toString()); + } response.setDirectDownload(template.isDirectDownload()); } response.setCreated(kubernetesSupportedVersion.getCreated()); diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml index b4167147ba48..2c18efa01891 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml @@ -221,6 +221,10 @@ write_files: fi kubeadm join {{ k8s_control_node.join_ip }}:6443 --token {{ k8s_control_node.cluster.token }} --control-plane --certificate-key {{ k8s_control_node.cluster.ha.certificate.key }} --discovery-token-unsafe-skip-ca-verification + mkdir -p /root/.kube + cp -i /etc/kubernetes/admin.conf /root/.kube/config + chown $(id -u):$(id -g) /root/.kube/config + sudo touch /home/cloud/success echo "true" > /home/cloud/success diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret index 9ffd3ecc84ad..f4536feca36b 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-cloudstack-secret @@ -21,14 +21,14 @@ function usage() { Usage: ./deploy-cloudstack-secret [OPTIONS]... To deploy the keys needed for the cloudstack kubernetes provider. Arguments: - -u, --url string ID of the cluster + -u, --url string URL of the CloudStack API -k, --key string API Key -s, --secret string Secret Key -p, --project string Project ID Other arguments: -h, --help Display this help message and exit Examples: - ./deploy-cloudstack-secret -u http://localhost:8080 -k abcd -s efgh + ./deploy-cloudstack-secret -u http://10.10.10.10:8080/client/api -k abcd -s efgh USAGE exit 0 } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManager.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManager.java index 339b0c1c3f38..8332c8be9355 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManager.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManager.java @@ -32,7 +32,7 @@ public interface InternalLoadBalancerVMManager { //RAM/CPU for the system offering used by Internal LB VMs - public static final int DEFAULT_INTERNALLB_VM_RAMSIZE = 256; // 256 MB + public static final int DEFAULT_INTERNALLB_VM_RAMSIZE = 512; // 512 MB public static final int DEFAULT_INTERNALLB_VM_CPU_MHZ = 256; // 256 MHz /** diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 22689909f0ea..8b521dcb2674 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -304,6 +304,11 @@ public String getConnectedSdc(long poolId, long hostId) { return null; } + @Override + public boolean requiresAccessForMigration(DataObject dataObject) { + return true; + } + @Override public long getUsedBytes(StoragePool storagePool) { long usedSpaceBytes = 0; diff --git a/pom.xml b/pom.xml index 3030302c24e7..a8062ae8fe65 100644 --- a/pom.xml +++ b/pom.xml @@ -50,7 +50,7 @@ UTF-8 UTF-8 https://download.cloudstack.org/systemvm - 4.19.0.0 + 4.20.0.0 apache https://sonarcloud.io @@ -151,7 +151,9 @@ 3.1.1 1.3.2 2.3.0 - 2.3.2-1 + 2.3.9 + 2.3.3 + 2.3.7 2.26 9.4.51.v20230217 9.4.27.v20200227 @@ -1040,6 +1042,7 @@ systemvm/agent/js/jquery.js systemvm/agent/js/jquery.flot.navigate.js systemvm/agent/noVNC/** + systemvm/agent/packages/** systemvm/debian/** test/integration/component/test_host_ha.sh test/systemvm/README.md diff --git a/scripts/installer/cloudstack-help-text b/scripts/installer/cloudstack-help-text index 1231464aa569..fbb2f0f0a7ee 100755 --- a/scripts/installer/cloudstack-help-text +++ b/scripts/installer/cloudstack-help-text @@ -40,4 +40,10 @@ printf " * Release notes: https://docs.cloudstack.apache.org/en/${ACL_MINO printf " * Join mailing lists: https://cloudstack.apache.org/mailing-lists.html\n" printf " * Take the survey: https://cloudstack.apache.org/survey.html\n" printf " * Report issues: https://github.com/apache/cloudstack/issues/new\n" + +if [ "$1" = "management" ];then + printf "\nSince Apache CloudStack 4.20.0.0, the System VMs and virtual routers require at least 512 MiB memory, please check the System Offerings." + printf "\nMore information can be found at https://docs.cloudstack.apache.org/en/${ACL_MINOR_VERSION:-latest}/upgrading/upgrade/_sysvm_restart.html\n" +fi + printf "\n" diff --git a/scripts/storage/secondary/swift b/scripts/storage/secondary/swift index b6a17159c66c..b2aaa55abe66 100755 --- a/scripts/storage/secondary/swift +++ b/scripts/storage/secondary/swift @@ -335,7 +335,7 @@ def get_container(url, token, container, marker=None, limit=None, :param marker: marker query :param limit: limit query :param prefix: prefix query - :param delimeter: string to delimit the queries on + :param delimiter: string to delimit the queries on :param http_conn: HTTP connection object (If None, it will create the conn object) :param full_listing: if True, return a full listing, else returns a max diff --git a/scripts/vm/hypervisor/xenserver/swift b/scripts/vm/hypervisor/xenserver/swift index a9a5b0a88ce2..f1eb89370ba8 100755 --- a/scripts/vm/hypervisor/xenserver/swift +++ b/scripts/vm/hypervisor/xenserver/swift @@ -337,7 +337,7 @@ def get_container(url, token, container, marker=None, limit=None, :param marker: marker query :param limit: limit query :param prefix: prefix query - :param delimeter: string to delimit the queries on + :param delimiter: string to delimit the queries on :param http_conn: HTTP connection object (If None, it will create the conn object) :param full_listing: if True, return a full listing, else returns a max diff --git a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index 1e92a80a2805..36330d6685c6 100644 --- a/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/main/java/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -27,6 +27,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.springframework.stereotype.Component; import com.cloud.agent.manager.allocator.HostAllocator; @@ -210,6 +211,10 @@ public List allocateTo(VirtualMachineProfile vmProfile, DeploymentPlan pla // add all hosts that we are not considering to the avoid list List allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null); allhostsInCluster.removeAll(clusterHosts); + + logger.debug(() -> String.format("Adding hosts [%s] to the avoid set because these hosts do not support HA.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(allhostsInCluster, "uuid", "name"))); + for (HostVO host : allhostsInCluster) { avoid.addHost(host.getId()); } @@ -325,10 +330,8 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V //find number of guest VMs occupying capacity on this host. if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - if (logger.isDebugEnabled()) { - logger.debug("Host name: " + host.getName() + ", hostId: " + host.getId() + - " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - } + logger.debug(() -> String.format("Adding host [%s] to the avoid set because this host already has the max number of running (user and/or system) VMs.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); avoid.addHost(host.getId()); continue; } @@ -337,7 +340,8 @@ protected List allocateTo(DeploymentPlan plan, ServiceOffering offering, V if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.vgpuType.toString())) != null) { ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(serviceOfferingId, GPU.Keys.pciDevice.toString()); if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - logger.info("Host name: " + host.getName() + ", hostId: "+ host.getId() +" does not have required GPU devices available"); + logger.debug(String.format("Adding host [%s] to avoid set, because this host does not have required GPU devices available.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "name"))); avoid.addHost(host.getId()); continue; } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 2d677042b62c..ef20d54aef57 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -959,7 +959,7 @@ public enum Config { ManagementServer.class, Integer.class, "network.loadbalancer.basiczone.elb.vm.ram.size", - "128", + "512", "Memory in MB for the elastic load balancer vm", null), ElasticLoadBalancerVmCpuMhz( @@ -1291,7 +1291,7 @@ public enum Config { "The allowable clock difference in milliseconds between when an SSO login request is made and when it is received.", null), //NetworkType("Hidden", ManagementServer.class, String.class, "network.type", "vlan", "The type of network that this deployment will use.", "vlan,direct"), - RouterRamSize("Hidden", NetworkOrchestrationService.class, Integer.class, "router.ram.size", "256", "Default RAM for router VM (in MB).", null), + RouterRamSize("Hidden", NetworkOrchestrationService.class, Integer.class, "router.ram.size", "512", "Default RAM for router VM (in MB).", null), DefaultPageSize("Advanced", ManagementServer.class, Long.class, "default.page.size", "500", "Default page size for API list* commands", null), diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index d5e0d013d30f..6ae92f892cf3 100644 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -3917,22 +3917,9 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { List existingZoneIds = diskOfferingDetailsDao.findZoneIds(diskOfferingId); Collections.sort(existingZoneIds); - // check if valid domain - if (CollectionUtils.isNotEmpty(domainIds)) { - for (final Long domainId: domainIds) { - if (_domainDao.findById(domainId) == null) { - throw new InvalidParameterValueException("Please specify a valid domain id"); - } - } - } + validateDomain(domainIds); - // check if valid zone - if (CollectionUtils.isNotEmpty(zoneIds)) { - for (Long zoneId : zoneIds) { - if (_zoneDao.findById(zoneId) == null) - throw new InvalidParameterValueException("Please specify a valid zone id"); - } - } + validateZone(zoneIds); Long userId = CallContext.current().getCallingUserId(); if (userId == null) { @@ -3955,35 +3942,16 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { Collections.sort(filteredZoneIds); if (account.getType() == Account.Type.DOMAIN_ADMIN) { - if (!filteredZoneIds.equals(existingZoneIds)) { // Domain-admins cannot update zone(s) for offerings - throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering: %s by admin: %s as it is domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); - } - if (existingDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update public disk offering: %s by user: %s because it is domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); - } else { - if (filteredDomainIds.isEmpty()) { - throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s to a public offering by user: %s because it is domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); - } - } + checkDomainAdminUpdateOfferingRestrictions(diskOfferingHandle, user, filteredZoneIds, existingZoneIds, existingDomainIds, filteredDomainIds); + if (StringUtils.isNotBlank(tags) && !ALLOW_DOMAIN_ADMINS_TO_CREATE_TAGGED_OFFERINGS.valueIn(account.getAccountId())) { throw new InvalidParameterValueException(String.format("User [%s] is unable to update disk offering tags.", user.getUuid())); } - List nonChildDomains = new ArrayList<>(); - for (Long domainId : existingDomainIds) { - if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - if (name != null || displayText != null || sortKey != null) { // Domain-admins cannot update name, display text, sort key for offerings with domain which are not child domains for domain-admin - throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s as it has linked domain(s) which are not child domain for domain-admin: %s", diskOfferingHandle.getUuid(), user.getUuid())); - } - nonChildDomains.add(domainId); - } - } - for (Long domainId : filteredDomainIds) { - if (!_domainDao.isChildDomain(account.getDomainId(), domainId)) { - Domain domain = _entityMgr.findById(Domain.class, domainId); - throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by domain-admin: %s with domain: %3$s which is not a child domain", diskOfferingHandle.getUuid(), user.getUuid(), domain.getUuid())); - } - } + List nonChildDomains = getAccountNonChildDomains(diskOfferingHandle, account, user, cmd, existingDomainIds); + + checkIfDomainIsChildDomain(diskOfferingHandle, account, user, filteredDomainIds); + filteredDomainIds.addAll(nonChildDomains); // Final list must include domains which were not child domain for domain-admin but specified for this offering prior to update } else if (account.getType() != Account.Type.ADMIN) { throw new InvalidParameterValueException(String.format("Unable to update disk offering: %s by id user: %s because it is not root-admin or domain-admin", diskOfferingHandle.getUuid(), user.getUuid())); @@ -3999,22 +3967,7 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { } final DiskOfferingVO diskOffering = _diskOfferingDao.createForUpdate(diskOfferingId); - - if (name != null) { - diskOffering.setName(name); - } - - if (displayText != null) { - diskOffering.setDisplayText(displayText); - } - - if (sortKey != null) { - diskOffering.setSortKey(sortKey); - } - - if (displayDiskOffering != null) { - diskOffering.setDisplayOffering(displayDiskOffering); - } + updateDiskOfferingIfCmdAttributeNotNull(diskOffering, cmd); updateOfferingTagsIfIsNotNull(tags, diskOffering); @@ -4037,26 +3990,7 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { } List detailsVO = new ArrayList<>(); if(detailsUpdateNeeded) { - SearchBuilder sb = diskOfferingDetailsDao.createSearchBuilder(); - sb.and("offeringId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); - sb.and("detailName", sb.entity().getName(), SearchCriteria.Op.EQ); - sb.done(); - SearchCriteria sc = sb.create(); - sc.setParameters("offeringId", String.valueOf(diskOfferingId)); - if(!filteredDomainIds.equals(existingDomainIds)) { - sc.setParameters("detailName", ApiConstants.DOMAIN_ID); - diskOfferingDetailsDao.remove(sc); - for (Long domainId : filteredDomainIds) { - detailsVO.add(new DiskOfferingDetailVO(diskOfferingId, ApiConstants.DOMAIN_ID, String.valueOf(domainId), false)); - } - } - if(!filteredZoneIds.equals(existingZoneIds)) { - sc.setParameters("detailName", ApiConstants.ZONE_ID); - diskOfferingDetailsDao.remove(sc); - for (Long zoneId : filteredZoneIds) { - detailsVO.add(new DiskOfferingDetailVO(diskOfferingId, ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); - } - } + updateDiskOfferingDetails(detailsVO, diskOfferingId, filteredDomainIds, existingDomainIds, filteredZoneIds, existingZoneIds); } if (!detailsVO.isEmpty()) { for (DiskOfferingDetailVO detailVO : detailsVO) { @@ -4067,6 +4001,128 @@ public DiskOffering updateDiskOffering(final UpdateDiskOfferingCmd cmd) { return _diskOfferingDao.findById(diskOfferingId); } + protected void validateDomain(List domainIds) { + if (CollectionUtils.isEmpty(domainIds)) { + return; + } + + for (final Long domainId: domainIds) { + if (_domainDao.findById(domainId) == null) { + throw new InvalidParameterValueException("Please specify a valid domain id."); + } + } + } + + protected void validateZone(List zoneIds) { + if (CollectionUtils.isEmpty(zoneIds)) { + return; + } + + for (Long zoneId : zoneIds) { + if (_zoneDao.findById(zoneId) == null) { + throw new InvalidParameterValueException("Please specify a valid zone id."); + } + } + } + + protected void updateDiskOfferingIfCmdAttributeNotNull(DiskOfferingVO diskOffering, UpdateDiskOfferingCmd cmd) { + if (cmd.getDiskOfferingName() != null) { + diskOffering.setName(cmd.getDiskOfferingName()); + } + + if (cmd.getDisplayText() != null) { + diskOffering.setDisplayText(cmd.getDisplayText()); + } + + if (cmd.getSortKey() != null) { + diskOffering.setSortKey(cmd.getSortKey()); + } + + if (cmd.getDisplayOffering() != null) { + diskOffering.setDisplayOffering(cmd.getDisplayOffering()); + } + } + + protected void updateDiskOfferingDetails(List detailsVO, Long diskOfferingId, List filteredDomainIds, + List existingDomainIds, List filteredZoneIds, List existingZoneIds) { + SearchBuilder sb = diskOfferingDetailsDao.createSearchBuilder(); + sb.and("offeringId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("detailName", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("offeringId", String.valueOf(diskOfferingId)); + + updateDiskOfferingDetailsDomainIds(detailsVO, sc, diskOfferingId, filteredDomainIds, existingDomainIds); + updateDiskOfferingDetailsZoneIds(detailsVO, sc, diskOfferingId, filteredZoneIds, existingZoneIds); + } + + protected void updateDiskOfferingDetailsDomainIds(List detailsVO, SearchCriteria sc, Long diskOfferingId, List filteredDomainIds, List existingDomainIds) { + if (filteredDomainIds.equals(existingDomainIds)) { + return; + } + + sc.setParameters("detailName", ApiConstants.DOMAIN_ID); + diskOfferingDetailsDao.remove(sc); + for (Long domainId : filteredDomainIds) { + detailsVO.add(new DiskOfferingDetailVO(diskOfferingId, ApiConstants.DOMAIN_ID, String.valueOf(domainId), false)); + } + } + + protected void updateDiskOfferingDetailsZoneIds(List detailsVO, SearchCriteria sc, Long diskOfferingId, List filteredZoneIds, List existingZoneIds) { + if (filteredZoneIds.equals(existingZoneIds)) { + return; + } + + sc.setParameters("detailName", ApiConstants.ZONE_ID); + diskOfferingDetailsDao.remove(sc); + for (Long zoneId : filteredZoneIds) { + detailsVO.add(new DiskOfferingDetailVO(diskOfferingId, ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); + } + } + + protected void checkDomainAdminUpdateOfferingRestrictions(DiskOffering diskOffering, User user, List filteredZoneIds, List existingZoneIds, + List existingDomainIds, List filteredDomainIds) { + if (!filteredZoneIds.equals(existingZoneIds)) { + throw new InvalidParameterValueException(String.format("Unable to update zone(s) for disk offering [%s] by admin [%s] as it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + } + if (existingDomainIds.isEmpty()) { + throw new InvalidParameterValueException(String.format("Unable to update public disk offering [%s] by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + } + if (filteredDomainIds.isEmpty()) { + throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] to a public offering by user [%s] because it is domain-admin.", diskOffering.getUuid(), user.getUuid())); + } + } + + protected List getAccountNonChildDomains(DiskOffering diskOffering, Account account, User user, + UpdateDiskOfferingCmd cmd, List existingDomainIds) { + List nonChildDomains = new ArrayList<>(); + String name = cmd.getDiskOfferingName(); + String displayText = cmd.getDisplayText(); + Integer sortKey = cmd.getSortKey(); + for (Long domainId : existingDomainIds) { + if (_domainDao.isChildDomain(account.getDomainId(), domainId)) { + continue; + } + + if (ObjectUtils.anyNotNull(name, displayText, sortKey)) { + throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] as it has linked domain(s) which are not child domain for domain-admin [%s].", diskOffering.getUuid(), user.getUuid())); + } + nonChildDomains.add(domainId); + } + return nonChildDomains; + } + + protected void checkIfDomainIsChildDomain(DiskOffering diskOffering, Account account, User user, List filteredDomainIds) { + for (Long domainId : filteredDomainIds) { + if (_domainDao.isChildDomain(account.getDomainId(), domainId)) { + continue; + } + + Domain domain = _entityMgr.findById(Domain.class, domainId); + throw new InvalidParameterValueException(String.format("Unable to update disk offering [%s] by domain-admin [%s] with domain [%3$s] which is not a child domain.", diskOffering.getUuid(), user.getUuid(), domain.getUuid())); + } + } + /** * Check the tags parameters to the disk/service offering *
    diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 41a51bc7af5f..d97fcef7453e 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -46,7 +46,9 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -128,6 +130,7 @@ import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.AccountManager; import com.cloud.utils.DateUtil; +import com.cloud.utils.LogUtils; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; @@ -299,25 +302,30 @@ protected void avoidOtherClustersForDeploymentIfMigrationDisabled(VirtualMachine @Override public DeployDestination planDeployment(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) throws InsufficientServerCapacityException, AffinityConflictException { + logger.debug(logDeploymentWithoutException(vmProfile.getVirtualMachine(), plan, avoids, planner)); ServiceOffering offering = vmProfile.getServiceOffering(); - int cpu_requested = offering.getCpu() * offering.getSpeed(); - long ram_requested = offering.getRamSize() * 1024L * 1024L; + int cpuRequested = offering.getCpu() * offering.getSpeed(); + long ramRequested = offering.getRamSize() * 1024L * 1024L; VirtualMachine vm = vmProfile.getVirtualMachine(); DataCenter dc = _dcDao.findById(vm.getDataCenterId()); boolean volumesRequireEncryption = anyVolumeRequiresEncryption(_volsDao.findByInstance(vm.getId())); if (vm.getType() == VirtualMachine.Type.User || vm.getType() == VirtualMachine.Type.DomainRouter) { + logger.debug("Checking non dedicated resources to deploy VM [{}].", () -> ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName")); checkForNonDedicatedResources(vmProfile, dc, avoids); } - if (logger.isDebugEnabled()) { - logger.debug("DeploymentPlanner allocation algorithm: " + planner); - logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + - plan.getClusterId() + ", requested cpu: " + cpu_requested + ", requested ram: " + toHumanReadableSize(ram_requested)); + logger.debug(() -> { + String datacenter = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(dc, "uuid", "name"); + String podVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_podDao.findById(plan.getPodId()), "uuid", "name"); + String clusterVO = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(_clusterDao.findById(plan.getClusterId()), "uuid", "name"); + String vmDetails = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(vm, "uuid", "type", "instanceName"); + return String.format("Trying to allocate a host and storage pools from datacenter [%s], pod [%s], cluster [%s], to deploy VM [%s] " + + "with requested CPU [%s] and requested RAM [%s].", datacenter, podVO, clusterVO, vmDetails, cpuRequested, toHumanReadableSize(ramRequested)); + }); - logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId() != null ? "Yes" : "No")); - } + logger.debug("ROOT volume [{}] {} to deploy VM [{}].", () -> getRootVolumeUuid(_volsDao.findByInstance(vm.getId())), () -> plan.getPoolId() != null ? "is ready" : "is not ready", vm::getUuid); avoidDisabledResources(vmProfile, dc, avoids); @@ -325,81 +333,7 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym String uefiFlag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.UefiFlag); if (plan.getHostId() != null && haVmTag == null) { - Long hostIdSpecified = plan.getHostId(); - if (logger.isDebugEnabled()) { - logger.debug("DeploymentPlan has host_id specified, choosing this host: " + hostIdSpecified); - } - HostVO host = _hostDao.findById(hostIdSpecified); - if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { - DetailVO uefiHostDetail = _hostDetailsDao.findDetail(host.getId(), Host.HOST_UEFI_ENABLE); - if (uefiHostDetail == null || "false".equalsIgnoreCase(uefiHostDetail.getValue())) { - logger.debug("Cannot deploy to specified host as host does n't support uefi vm deployment, returning."); - return null; - - } - } - if (host == null) { - logger.debug("The specified host cannot be found"); - } else if (avoids.shouldAvoid(host)) { - logger.debug("The specified host is in avoid set"); - } else { - if (logger.isDebugEnabled()) { - logger.debug( - "Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - } - - Pod pod = _podDao.findById(host.getPodId()); - - Cluster cluster = _clusterDao.findById(host.getClusterId()); - - boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - - // search for storage under the zone, pod, cluster of the host. - DataCenterDeployment lastPlan = - new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, - plan.getReservationContext()); - - Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - _hostDao.loadDetails(host); - if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - logger.warn(String.format("VM's volumes require encryption support, and provided host %s can't handle it", host)); - return null; - } else { - logger.debug(String.format("Volume encryption requirements are met by provided host %s", host)); - } - - // choose the potential pool for this VM for this host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); - if (potentialResources != null) { - pod = _podDao.findById(host.getPodId()); - cluster = _clusterDao.findById(host.getClusterId()); - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from destination, since - // we don't have to prepare this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); - logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } - logger.debug("Cannot deploy to specified host, returning."); - return null; + return deployInSpecifiedHostWithoutHA(vmProfile, plan, avoids, planner, vm, dc, uefiFlag); } // call affinitygroup chain @@ -410,11 +344,14 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym processor.process(vmProfile, plan, avoids); } } + logger.debug("DeploymentPlan [{}] has not specified host. Trying to find another destination to deploy VM [{}], avoiding pods [{}], clusters [{}] and hosts [{}].", + () -> plan.getClass().getSimpleName(), vmProfile::getUuid, () -> StringUtils.join(avoids.getPodsToAvoid(), ", "), () -> StringUtils.join(avoids.getClustersToAvoid(), ", "), + () -> StringUtils.join(avoids.getHostsToAvoid(), ", ")); + + + logger.debug("Deploy avoids pods: {}, clusters: {}, hosts: {}.", avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid()); + logger.debug("Deploy hosts with priorities {}, hosts have NORMAL priority by default", plan.getHostPriorities()); - if (logger.isDebugEnabled()) { - logger.debug("Deploy avoids pods: " + avoids.getPodsToAvoid() + ", clusters: " + avoids.getClustersToAvoid() + ", hosts: " + avoids.getHostsToAvoid()); - logger.debug("Deploy hosts with priorities " + plan.getHostPriorities() + " , hosts have NORMAL priority by default"); - } // call planners // DataCenter dc = _dcDao.findById(vm.getDataCenterId()); @@ -448,108 +385,11 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym HostVO host = _hostDao.findById(vm.getLastHostId()); lastHost = host; - _hostDao.loadHostTags(host); - _hostDao.loadDetails(host); - ServiceOfferingDetailsVO offeringDetails = null; - if (host == null) { - logger.debug("The last host of this VM cannot be found"); - } else if (avoids.shouldAvoid(host)) { - logger.debug("The last host of this VM is in avoid set"); - } else if (plan.getClusterId() != null && host.getClusterId() != null - && !plan.getClusterId().equals(host.getClusterId())) { - logger.debug("The last host of this VM cannot be picked as the plan specifies different clusterId: " - + plan.getClusterId()); - } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { - logger.debug("The last Host, hostId: " + host.getId() + - " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts"); - } else if ((offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) { - ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); - if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){ - logger.debug("The last host of this VM does not have required GPU devices available"); - } - } else if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { - logger.warn(String.format("The last host of this VM %s does not support volume encryption, which is required by this VM.", host)); - } else { - if (host.getStatus() == Status.Up) { - if (checkVmProfileAndHost(vmProfile, host)) { - long cluster_id = host.getClusterId(); - ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, - "cpuOvercommitRatio"); - ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, - "memoryOvercommitRatio"); - Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); - Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); - - boolean hostHasCpuCapability, hostHasCapacity = false; - hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); - - if (hostHasCpuCapability) { - // first check from reserved capacity - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); - - // if not reserved, check the free capacity - if (!hostHasCapacity) - hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); - } - - boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); - if (hostHasCapacity - && hostHasCpuCapability) { - logger.debug("The last host of this VM is UP and has enough capacity"); - logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() - + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId()); - - Pod pod = _podDao.findById(host.getPodId()); - Cluster cluster = _clusterDao.findById(host.getClusterId()); - if (vm.getHypervisorType() == HypervisorType.BareMetal) { - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); - logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - // search for storage under the zone, pod, cluster - // of - // the last host. - DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), - host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); - Pair>, List> result = findSuitablePoolsForVolumes( - vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); - Map> suitableVolumeStoragePools = result.first(); - List readyAndReusedVolumes = result.second(); - - // choose the potential pool for this VM for this - // host - if (!suitableVolumeStoragePools.isEmpty()) { - List suitableHosts = new ArrayList(); - suitableHosts.add(host); - Pair> potentialResources = findPotentialDeploymentResources( - suitableHosts, suitableVolumeStoragePools, avoids, - getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); - if (potentialResources != null) { - Map storageVolMap = potentialResources.second(); - // remove the reused vol<->pool from - // destination, since we don't have to - // prepare - // this volume. - for (Volume vol : readyAndReusedVolumes) { - storageVolMap.remove(vol); - } - DeployDestination dest = new DeployDestination(dc, pod, cluster, host, - storageVolMap, displayStorage); - logger.debug("Returning Deployment Destination: " + dest); - return dest; - } - } - } else { - logger.debug("The last host of this VM does not have enough capacity"); - } - } - } else { - logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " + - host.getResourceState()); - } + DeployDestination deployDestination = deployInVmLastHost(vmProfile, plan, avoids, planner, vm, dc, offering, cpuRequested, ramRequested, volumesRequireEncryption); + if (deployDestination != null) { + return deployDestination; } - logger.debug("Cannot choose the last host to deploy this VM "); } avoidOtherClustersForDeploymentIfMigrationDisabled(vm, lastHost, avoids); @@ -614,6 +454,208 @@ public DeployDestination planDeployment(VirtualMachineProfile vmProfile, Deploym return dest; } + private DeployDestination deployInVmLastHost(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, + DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, ServiceOffering offering, int cpuRequested, long ramRequested, + boolean volumesRequireEncryption) throws InsufficientServerCapacityException { + HostVO host = _hostDao.findById(vm.getLastHostId()); + _hostDao.loadHostTags(host); + _hostDao.loadDetails(host); + + if (canUseLastHost(host, avoids, plan, vm, offering, volumesRequireEncryption)) { + if (host.getStatus() != Status.Up) { + logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host is not in UP state or is not enabled. Host current status [{}] and resource status [{}].", + vm.getUuid(), host.getUuid(), host.getState().name(), host.getResourceState()); + return null; + } + if (checkVmProfileAndHost(vmProfile, host)) { + long cluster_id = host.getClusterId(); + ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio"); + ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); + float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue()); + float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue()); + + boolean hostHasCpuCapability, hostHasCapacity = false; + hostHasCpuCapability = _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed()); + + if (hostHasCpuCapability) { + // first check from reserved capacity + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, true, cpuOvercommitRatio, memoryOvercommitRatio, true); + + // if not reserved, check the free capacity + if (!hostHasCapacity) + hostHasCapacity = _capacityMgr.checkIfHostHasCapacity(host.getId(), cpuRequested, ramRequested, false, cpuOvercommitRatio, memoryOvercommitRatio, true); + } + + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); + if (!hostHasCapacity || !hostHasCpuCapability) { + logger.debug("Cannot deploy VM [{}] to the last host [{}] because this host does not have enough capacity to deploy this VM.", vm.getUuid(), host.getUuid()); + return null; + } + logger.debug("Last host [{}] of VM [{}] is UP and has enough capacity. Checking for suitable pools for this host under zone [{}], pod [{}] and cluster [{}].", + host.getUuid(), vm.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId()); + + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), displayStorage); + logger.debug("Returning Deployment Destination: {}.", dest); + return dest; + } + + // search for storage under the zone, pod, cluster + // of + // the last host. + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), + host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null); + Pair>, List> result = findSuitablePoolsForVolumes( + vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + // choose the potential pool for this VM for this + // host + if (suitableVolumeStoragePools.isEmpty()) { + logger.debug("Cannot find suitable storage pools in host [{}] to deploy VM [{}]", host.getUuid(), vm.getUuid()); + return null; + } + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources( + suitableHosts, suitableVolumeStoragePools, avoids, + getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes, plan.getPreferredHosts(), vm); + if (potentialResources != null) { + Map storageVolMap = potentialResources.second(); + // remove the reused vol<->pool from + // destination, since we don't have to + // prepare + // this volume. + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); + logger.debug("Returning Deployment Destination: {}", dest); + return dest; + } + } + } + logger.debug("Cannot choose the last host to deploy this VM {}.", vm); + return null; + } + + private boolean canUseLastHost(HostVO host, ExcludeList avoids, DeploymentPlan plan, VirtualMachine vm, ServiceOffering offering, boolean volumesRequireEncryption) { + if (host == null) { + logger.warn("Could not find last host of VM [{}] with id [{}]. Skipping this and trying other available hosts.", vm.getUuid(), vm.getLastHostId()); + return false; + } + + if (avoids.shouldAvoid(host)) { + logger.warn("The last host [{}] of VM [{}] is in the avoid set. Skipping this and trying other available hosts.", host.getUuid(), vm.getUuid()); + return false; + } + + if (plan.getClusterId() != null && host.getClusterId() != null && !plan.getClusterId().equals(host.getClusterId())) { + logger.debug(() -> String.format("The last host [%s] of VM [%s] cannot be picked, as the plan [%s] specifies a different cluster [%s] to deploy this VM. Skipping this and trying other available hosts.", + ReflectionToStringBuilderUtils.reflectOnlySelectedFields(host, "uuid", "clusterId"), vm.getUuid(), plan.getClass().getSimpleName(), plan.getClusterId())); + return false; + } + + if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) { + logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host already has the max number of running VMs (users and system VMs). Skipping this and trying other available hosts.", + vm.getUuid(), host.getUuid()); + return false; + } + + ServiceOfferingDetailsVO offeringDetails = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString()); + ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString()); + if (offeringDetails != null && !_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())) { + logger.debug("Cannot deploy VM [{}] in the last host [{}] because this host does not have the required GPU devices available. Skipping this and trying other available hosts.", + vm.getUuid(), host.getUuid()); + return false; + } + + if (volumesRequireEncryption && !Boolean.parseBoolean(host.getDetail(Host.HOST_VOLUME_ENCRYPTION))) { + logger.warn("The last host of this VM {} does not support volume encryption, which is required by this VM.", host); + return false; + } + return true; + } + + private DeployDestination deployInSpecifiedHostWithoutHA(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids, + DeploymentPlanner planner, VirtualMachine vm, DataCenter dc, String uefiFlag) + throws InsufficientServerCapacityException { + Long hostIdSpecified = plan.getHostId(); + logger.debug("DeploymentPlan [{}] has specified host [{}] without HA flag. Choosing this host to deploy VM [{}].", plan.getClass().getSimpleName(), hostIdSpecified, vm.getUuid()); + + HostVO host = _hostDao.findById(hostIdSpecified); + if (host != null && StringUtils.isNotBlank(uefiFlag) && "yes".equalsIgnoreCase(uefiFlag)) { + _hostDao.loadDetails(host); + if (MapUtils.isNotEmpty(host.getDetails()) && host.getDetails().containsKey(Host.HOST_UEFI_ENABLE) && "false".equalsIgnoreCase(host.getDetails().get(Host.HOST_UEFI_ENABLE))) { + logger.debug("Cannot deploy VM [{}] to specified host [{}] because this host does not support UEFI VM deployment, returning.", vm.getUuid(), host.getUuid()); + return null; + } + } + if (host == null) { + logger.debug("Cannot deploy VM [{}] to host [{}] because this host cannot be found.", vm.getUuid(), hostIdSpecified); + return null; + } + if (avoids.shouldAvoid(host)) { + logger.debug("Cannot deploy VM [{}] to host [{}] because this host is in the avoid set.", vm.getUuid(), host.getUuid()); + return null; + } + + logger.debug("Trying to find suitable pools for host [{}] under pod [{}], cluster [{}] and zone [{}], to deploy VM [{}].", + host.getUuid(), host.getDataCenterId(), host.getPodId(), host.getClusterId(), vm.getUuid()); + + Pod pod = _podDao.findById(host.getPodId()); + Cluster cluster = _clusterDao.findById(host.getClusterId()); + + boolean displayStorage = getDisplayStorageFromVmProfile(vmProfile); + if (vm.getHypervisorType() == HypervisorType.BareMetal) { + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap(), + displayStorage); + logger.debug("Returning Deployment Destination: {}.", dest); + return dest; + } + + DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), + host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext()); + + Pair>, List> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, + avoids, HostAllocator.RETURN_UPTO_ALL); + Map> suitableVolumeStoragePools = result.first(); + List readyAndReusedVolumes = result.second(); + + if (!suitableVolumeStoragePools.isEmpty()) { + List suitableHosts = new ArrayList(); + suitableHosts.add(host); + Pair> potentialResources = findPotentialDeploymentResources(suitableHosts, + suitableVolumeStoragePools, avoids, getPlannerUsage(planner, vmProfile, plan, avoids), + readyAndReusedVolumes, plan.getPreferredHosts(), vm); + if (potentialResources != null) { + pod = _podDao.findById(host.getPodId()); + cluster = _clusterDao.findById(host.getClusterId()); + Map storageVolMap = potentialResources.second(); + for (Volume vol : readyAndReusedVolumes) { + storageVolMap.remove(vol); + } + DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap, displayStorage); + logger.debug("Returning Deployment Destination: {}", dest); + return dest; + } + } + logger.debug("Cannot deploy VM [{}] under host [{}], because no suitable pools were found.", vmProfile.getUuid(), host.getUuid()); + return null; + } + + protected String getRootVolumeUuid(List volumes) { + for (Volume volume : volumes) { + if (volume.getVolumeType() == Volume.Type.ROOT) { + return volume.getUuid(); + } + } + return null; + } + protected boolean anyVolumeRequiresEncryption(List volumes) { for (Volume volume : volumes) { if (volume.getPassphraseId() != null) { @@ -636,32 +678,29 @@ private boolean getDisplayStorageFromVmProfile(VirtualMachineProfile vmProfile) return vmProfile == null || vmProfile.getTemplate() == null || !vmProfile.getTemplate().isDeployAsIs(); } - /** - * Adds disabled resources (Data centers, Pods, Clusters, and hosts) to exclude list (avoid) in case of disabled state. - */ - public void avoidDisabledResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { - if (vmProfile.getType().isUsedBySystem() && isRouterDeployableInDisabledResources()) { - return; - } - - VMInstanceVO vm = _vmInstanceDao.findById(vmProfile.getId()); - AccountVO owner = accountDao.findById(vm.getAccountId()); - boolean isOwnerRoleIdAdmin = false; - - if (owner != null && owner.getRoleId() != null && owner.getRoleId() == ADMIN_ACCOUNT_ROLE_ID) { - isOwnerRoleIdAdmin = true; - } + /** + * Adds disabled resources (Data centers, Pods, Clusters, and hosts) to exclude + * list (avoid) in case of disabled state. + */ + public void avoidDisabledResources(VirtualMachineProfile vmProfile, DataCenter dc, ExcludeList avoids) { + if (vmProfile.getType().isUsedBySystem() && isRouterDeployableInDisabledResources()) { + return; + } - if (isOwnerRoleIdAdmin && isAdminVmDeployableInDisabledResources()) { - return; - } + VMInstanceVO vm = _vmInstanceDao.findById(vmProfile.getId()); + AccountVO owner = accountDao.findById(vm.getAccountId()); + boolean isOwnerRoleIdAdmin = owner != null && owner.getRoleId() != null && owner.getRoleId() == ADMIN_ACCOUNT_ROLE_ID; - avoidDisabledDataCenters(dc, avoids); - avoidDisabledPods(dc, avoids); - avoidDisabledClusters(dc, avoids); - avoidDisabledHosts(dc, avoids); + if (isOwnerRoleIdAdmin && isAdminVmDeployableInDisabledResources()) { + return; } + avoidDisabledDataCenters(dc, avoids); + avoidDisabledPods(dc, avoids); + avoidDisabledClusters(dc, avoids); + avoidDisabledHosts(dc, avoids); + } + /** * Returns the value of the ConfigKey 'allow.router.on.disabled.resources'. * @note this method allows mocking and testing with the respective ConfigKey parameter. @@ -683,6 +722,8 @@ protected boolean isAdminVmDeployableInDisabledResources() { */ protected void avoidDisabledHosts(DataCenter dc, ExcludeList avoids) { List disabledHosts = _hostDao.listDisabledByDataCenterId(dc.getId()); + logger.debug(() -> String.format("Adding hosts [%s] of datacenter [%s] to the avoid set, because these hosts are in the Disabled state.", + disabledHosts.stream().map(HostVO::getUuid).collect(Collectors.joining(", ")), dc.getUuid())); for (HostVO host : disabledHosts) { avoids.addHost(host.getId()); } @@ -695,6 +736,7 @@ protected void avoidDisabledClusters(DataCenter dc, ExcludeList avoids) { List pods = _podDao.listAllPods(dc.getId()); for (Long podId : pods) { List disabledClusters = _clusterDao.listDisabledClusters(dc.getId(), podId); + logger.debug(() -> String.format("Adding clusters [%s] of pod [%s] to the void set because these clusters are in the Disabled state.", StringUtils.join(disabledClusters, ", "), podId)); avoids.addClusterList(disabledClusters); } } @@ -704,6 +746,7 @@ protected void avoidDisabledClusters(DataCenter dc, ExcludeList avoids) { */ protected void avoidDisabledPods(DataCenter dc, ExcludeList avoids) { List disabledPods = _podDao.listDisabledPods(dc.getId()); + logger.debug(() -> String.format("Adding pods [%s] to the avoid set because these pods are in the Disabled state.", StringUtils.join(disabledPods, ", "))); avoids.addPodList(disabledPods); } @@ -712,6 +755,7 @@ protected void avoidDisabledPods(DataCenter dc, ExcludeList avoids) { */ protected void avoidDisabledDataCenters(DataCenter dc, ExcludeList avoids) { if (dc.getAllocationState() == Grouping.AllocationState.Disabled) { + logger.debug("Adding datacenter [{}] to the avoid set because this datacenter is in Disabled state.", dc.getUuid()); avoids.addDataCenter(dc.getId()); } } @@ -765,6 +809,8 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC if (dedicatedZone != null && !_accountMgr.isRootAdmin(vmProfile.getOwner().getId())) { long accountDomainId = vmProfile.getOwner().getDomainId(); long accountId = vmProfile.getOwner().getAccountId(); + logger.debug("Zone [{}] is dedicated. Checking if account [{}] in domain [{}] can use this zone to deploy VM [{}].", + dedicatedZone.getUuid(), accountId, accountDomainId, vmProfile.getUuid()); // If a zone is dedicated to an account then all hosts in this zone // will be explicitly dedicated to @@ -784,7 +830,6 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC if (!_affinityGroupService.isAffinityGroupAvailableInDomain(dedicatedZone.getAffinityGroupId(), accountDomainId)) { throw new CloudRuntimeException("Failed to deploy VM, Zone " + dc.getName() + " not available for the user domain " + vmProfile.getOwner()); } - } // check affinity group of type Explicit dedication exists. If No put @@ -809,109 +854,97 @@ public void checkForNonDedicatedResources(VirtualMachineProfile vmProfile, DataC //Only when the type is instance VM and not explicitly dedicated. if (vm.getType() == VirtualMachine.Type.User && !isExplicit) { - //add explicitly dedicated resources in avoidList - if (logger.isDebugEnabled()) { - logger.debug("Adding pods to avoid lists for non-explicit VM deployment: " + allPodsInDc); - } - avoids.addPodList(allPodsInDc); - if (logger.isDebugEnabled()) { - logger.debug("Adding clusters to avoid lists for non-explicit VM deployment: " + allClustersInDc); - } - avoids.addClusterList(allClustersInDc); - if (logger.isDebugEnabled()) { - logger.debug("Adding hosts to avoid lists for non-explicit VM deployment: " + allHostsInDc); - } - avoids.addHostList(allHostsInDc); + findAvoidSetForNonExplicitUserVM(avoids, vm, allPodsInDc, allClustersInDc, allHostsInDc); } //Handle the Virtual Router Case //No need to check the isExplicit. As both the cases are handled. if (vm.getType() == VirtualMachine.Type.DomainRouter) { - long vmAccountId = vm.getAccountId(); - long vmDomainId = vm.getDomainId(); - - //Lists all explicitly dedicated resources from vm account ID or domain ID. - List allPodsFromDedicatedID = new ArrayList(); - List allClustersFromDedicatedID = new ArrayList(); - List allHostsFromDedicatedID = new ArrayList(); + findAvoiSetForRouterVM(avoids, vm, allPodsInDc, allClustersInDc, allHostsInDc); + } + } - //Whether the dedicated resources belong to Domain or not. If not, it may belongs to Account or no dedication. - List domainGroupMappings = _affinityGroupDomainMapDao.listByDomain(vmDomainId); + private void findAvoiSetForRouterVM(ExcludeList avoids, VirtualMachine vm, List allPodsInDc, List allClustersInDc, List allHostsInDc) { + long vmAccountId = vm.getAccountId(); + long vmDomainId = vm.getDomainId(); - //For temporary storage and indexing. - List tempStorage; + List allPodsFromDedicatedID = new ArrayList(); + List allClustersFromDedicatedID = new ArrayList(); + List allHostsFromDedicatedID = new ArrayList(); - if (domainGroupMappings == null || domainGroupMappings.isEmpty()) { - //The dedicated resource belongs to VM Account ID. + List domainGroupMappings = _affinityGroupDomainMapDao.listByDomain(vmDomainId); - tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, vmAccountId, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); + List tempStorage; - for(DedicatedResourceVO vo : tempStorage) { - allPodsFromDedicatedID.add(vo.getPodId()); - } + if (domainGroupMappings == null || domainGroupMappings.isEmpty()) { + tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, vmAccountId, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - tempStorage.clear(); - tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, vmAccountId, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); + for (DedicatedResourceVO vo : tempStorage) { + allPodsFromDedicatedID.add(vo.getPodId()); + } - for(DedicatedResourceVO vo : tempStorage) { - allClustersFromDedicatedID.add(vo.getClusterId()); - } + tempStorage.clear(); + tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, vmAccountId, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - tempStorage.clear(); - tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, vmAccountId, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); + for (DedicatedResourceVO vo : tempStorage) { + allClustersFromDedicatedID.add(vo.getClusterId()); + } - for(DedicatedResourceVO vo : tempStorage) { - allHostsFromDedicatedID.add(vo.getHostId()); - } + tempStorage.clear(); + tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, vmAccountId, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - //Remove the dedicated ones from main list - allPodsInDc.removeAll(allPodsFromDedicatedID); - allClustersInDc.removeAll(allClustersFromDedicatedID); - allHostsInDc.removeAll(allHostsFromDedicatedID); + for (DedicatedResourceVO vo : tempStorage) { + allHostsFromDedicatedID.add(vo.getHostId()); } - else { - //The dedicated resource belongs to VM Domain ID or No dedication. - - tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, null, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - for(DedicatedResourceVO vo : tempStorage) { - allPodsFromDedicatedID.add(vo.getPodId()); - } + allPodsInDc.removeAll(allPodsFromDedicatedID); + allClustersInDc.removeAll(allClustersFromDedicatedID); + allHostsInDc.removeAll(allHostsFromDedicatedID); + } else { + tempStorage = _dedicatedDao.searchDedicatedPods(null, vmDomainId, null, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - tempStorage.clear(); - tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, null, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); + for (DedicatedResourceVO vo : tempStorage) { + allPodsFromDedicatedID.add(vo.getPodId()); + } - for(DedicatedResourceVO vo : tempStorage) { - allClustersFromDedicatedID.add(vo.getClusterId()); - } + tempStorage.clear(); + tempStorage = _dedicatedDao.searchDedicatedClusters(null, vmDomainId, null, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - tempStorage.clear(); - tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, null, null, new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); + for (DedicatedResourceVO vo : tempStorage) { + allClustersFromDedicatedID.add(vo.getClusterId()); + } - for(DedicatedResourceVO vo : tempStorage) { - allHostsFromDedicatedID.add(vo.getHostId()); - } + tempStorage.clear(); + tempStorage = _dedicatedDao.searchDedicatedHosts(null, vmDomainId, null, null, + new Filter(DedicatedResourceVO.class, "id", true, 0L, 1L)).first(); - //Remove the dedicated ones from main list - allPodsInDc.removeAll(allPodsFromDedicatedID); - allClustersInDc.removeAll(allClustersFromDedicatedID); - allHostsInDc.removeAll(allHostsFromDedicatedID); + for (DedicatedResourceVO vo : tempStorage) { + allHostsFromDedicatedID.add(vo.getHostId()); } - //Add in avoid list or no addition if no dedication - if (logger.isDebugEnabled()) { - logger.debug("Adding pods to avoid lists: " + allPodsInDc); - } - avoids.addPodList(allPodsInDc); - if (logger.isDebugEnabled()) { - logger.debug("Adding clusters to avoid lists: " + allClustersInDc); - } - avoids.addClusterList(allClustersInDc); - if (logger.isDebugEnabled()) { - logger.debug("Adding hosts to avoid lists: " + allHostsInDc); - } - avoids.addHostList(allHostsInDc); + allPodsInDc.removeAll(allPodsFromDedicatedID); + allClustersInDc.removeAll(allClustersFromDedicatedID); + allHostsInDc.removeAll(allHostsFromDedicatedID); } + + logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of VR VM [%s], " + + "because this VM is not dedicated to this components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + avoids.addPodList(allPodsInDc); + avoids.addClusterList(allClustersInDc); + avoids.addHostList(allHostsInDc); + } + + private void findAvoidSetForNonExplicitUserVM(ExcludeList avoids, VirtualMachine vm, List allPodsInDc, List allClustersInDc, List allHostsInDc) { + logger.debug(() -> LogUtils.logGsonWithoutException("Adding pods [%s], clusters [%s] and hosts [%s] to the avoid list in the deploy process of user VM [%s], " + + "because this VM is not explicitly dedicated to these components.", allPodsInDc, allClustersInDc, allHostsInDc, vm.getUuid())); + avoids.addPodList(allPodsInDc); + avoids.addClusterList(allClustersInDc); + avoids.addHostList(allHostsInDc); } private void resetAvoidSet(ExcludeList avoidSet, ExcludeList removeSet) { @@ -1246,14 +1279,15 @@ private DeployDestination checkClustersforDestination(List clusterList, Vi DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList plannerAvoidOutput) { if (logger.isTraceEnabled()) { - logger.trace("ClusterId List to consider: " + clusterList); + logger.trace("ClusterId List to consider: {}.", clusterList); } for (Long clusterId : clusterList) { ClusterVO clusterVO = _clusterDao.findById(clusterId); if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) { - logger.debug("Cluster: " + clusterId + " has HyperVisorType that does not match the VM, skipping this cluster"); + logger.debug("Adding cluster [{}] to the avoid set because the cluster's hypervisor [{}] does not match the VM [{}] hypervisor: [{}]. Skipping this cluster.", + clusterVO.getUuid(), clusterVO.getHypervisorType().name(), vmProfile.getUuid(), vmProfile.getHypervisorType().name()); avoid.addCluster(clusterVO.getId()); continue; } @@ -1564,14 +1598,17 @@ public int compare(Volume v1, Volume v2) { boolean hostHasEncryption = Boolean.parseBoolean(potentialHostVO.getDetail(Host.HOST_VOLUME_ENCRYPTION)); boolean hostMeetsEncryptionRequirements = !anyVolumeRequiresEncryption(new ArrayList<>(volumesOrderBySizeDesc)) || hostHasEncryption; - boolean plannerUsageFits = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); + boolean hostFitsPlannerUsage = checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired); - if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && plannerUsageFits) { + if (hostCanAccessPool && haveEnoughSpace && hostAffinityCheck && hostMeetsEncryptionRequirements && hostFitsPlannerUsage) { logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: " + potentialHost.getName() + " and associated storage pools for this VM"); volumeAllocationMap.clear(); return new Pair>(potentialHost, storage); } else { + logger.debug("Adding host [{}] to the avoid set because: can access Pool [{}], has enough space [{}], affinity check [{}], fits planner [{}] usage [{}].", + potentialHost.getUuid(), hostCanAccessPool, haveEnoughSpace, hostAffinityCheck, resourceUsageRequired.getClass().getSimpleName(), hostFitsPlannerUsage); + if (!hostMeetsEncryptionRequirements) { logger.debug("Potential host " + potentialHost + " did not meet encryption requirements of all volumes"); } @@ -1668,12 +1705,12 @@ protected Pair>, List> findSuitablePoolsFo // There should be at least the ROOT volume of the VM in usable state if (volumesTobeCreated.isEmpty()) { // OfflineVmwareMigration: find out what is wrong with the id of the vm we try to start - throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getId()); + throw new CloudRuntimeException("Unable to create deployment, no usable volumes found for the VM: " + vmProfile.getUuid()); } // don't allow to start vm that doesn't have a root volume if (_volsDao.findByInstanceAndType(vmProfile.getId(), Volume.Type.ROOT).isEmpty()) { - throw new CloudRuntimeException("Unable to prepare volumes for vm as ROOT volume is missing"); + throw new CloudRuntimeException(String.format("Unable to deploy VM [%s] because the ROOT volume is missing.", vmProfile.getUuid())); } // for each volume find list of suitable storage pools by calling the @@ -1685,7 +1722,7 @@ protected Pair>, List> findSuitablePoolsFo Set poolsToAvoidOutput = new HashSet<>(originalAvoidPoolSet); for (VolumeVO toBeCreated : volumesTobeCreated) { - logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + "," + toBeCreated.getVolumeType().name() + ")"); + logger.debug("Checking suitable pools for volume [{}, {}] of VM [{}].", toBeCreated.getUuid(), toBeCreated.getVolumeType().name(), vmProfile.getUuid()); if (toBeCreated.getState() == Volume.State.Allocated && toBeCreated.getPoolId() != null) { toBeCreated.setPoolId(null); @@ -1701,72 +1738,18 @@ protected Pair>, List> findSuitablePoolsFo // volume is ready and the pool should be reused. // In this case, also check if rest of the volumes are ready and can // be reused. - if (plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) { - logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: " + toBeCreated.getPoolId()); - List suitablePools = new ArrayList(); - StoragePool pool = null; - if (toBeCreated.getPoolId() != null) { - pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); - } else { - pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); - } - - if (!pool.isInMaintenance()) { - if (!avoid.shouldAvoid(pool)) { - long exstPoolDcId = pool.getDataCenterId(); - long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; - long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; - boolean canReusePool = false; - if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) { - canReusePool = true; - } else if (plan.getDataCenterId() == exstPoolDcId) { - DataStore dataStore = dataStoreMgr.getPrimaryDataStore(pool.getId()); - if (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE) { - canReusePool = true; - } - } else { - logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume"); - canReusePool = false; - } - - if (canReusePool) { - logger.debug("Planner need not allocate a pool for this volume since its READY"); - suitablePools.add(pool); - suitableVolumeStoragePools.put(toBeCreated, suitablePools); - if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { - readyAndReusedVolumes.add(toBeCreated); - } - continue; - } - } else { - logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume"); - } - } else { - logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume"); - } + if ((plan.getPoolId() != null || (toBeCreated.getVolumeType() == Volume.Type.DATADISK && toBeCreated.getPoolId() != null && toBeCreated.getState() == Volume.State.Ready)) && + checkIfPoolCanBeReused(vmProfile, plan, avoid, suitableVolumeStoragePools, readyAndReusedVolumes, toBeCreated)) { + continue; } - if (logger.isDebugEnabled()) { - logger.debug("We need to allocate new storagepool for this volume"); - } - if (!isRootAdmin(vmProfile)) { - if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { - if (logger.isDebugEnabled()) { - logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled"); - logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning."); - } - // Cannot find suitable storage pools under this cluster for - // this volume since allocation_state is disabled. - // - remove any suitable pools found for other volumes. - // All volumes should get suitable pools under this cluster; - // else we can't use this cluster. - suitableVolumeStoragePools.clear(); - break; - } + if (!isRootAdmin(vmProfile) && !isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) { + logger.debug(String.format("Cannot find new storage pool to deploy volume [{}] of VM [{}] in cluster [{}] because allocation state is disabled. Returning.", + toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); + suitableVolumeStoragePools.clear(); + break; } - logger.debug("Calling StoragePoolAllocators to find suitable pools"); - DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId()); DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType()); @@ -1783,16 +1766,8 @@ protected Pair>, List> findSuitablePoolsFo useLocalStorage = diskOffering.isUseLocalStorage(); } diskProfile.setUseLocalStorage(useLocalStorage); - - boolean foundPotentialPools = false; - for (StoragePoolAllocator allocator : _storagePoolAllocators) { - final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); - if (suitablePools != null && !suitablePools.isEmpty()) { - checkForPreferredStoragePool(suitablePools, vmProfile.getVirtualMachine(), suitableVolumeStoragePools, toBeCreated); - foundPotentialPools = true; - break; - } - } + logger.debug(String.format("Calling StoragePoolAllocators to find suitable pools to allocate volume [{}] necessary to deploy VM [{}].", toBeCreated.getUuid(), vmProfile.getUuid())); + boolean foundPotentialPools = tryToFindPotentialPoolsToAlocateVolume(vmProfile, plan, avoid, returnUpTo, suitableVolumeStoragePools, toBeCreated, diskProfile); if (avoid.getPoolsToAvoid() != null) { poolsToAvoidOutput.addAll(avoid.getPoolsToAvoid()); @@ -1800,7 +1775,7 @@ protected Pair>, List> findSuitablePoolsFo } if (!foundPotentialPools) { - logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: " + plan.getClusterId()); + logger.debug(String.format("No suitable pools found for volume [{}] used by VM [{}] under cluster: [{}].", toBeCreated.getUuid(), vmProfile.getUuid(), plan.getClusterId())); // No suitable storage pools found under this cluster for this // volume. - remove any suitable pools found for other volumes. // All volumes should get suitable pools under this cluster; @@ -1829,6 +1804,75 @@ protected Pair>, List> findSuitablePoolsFo return new Pair>, List>(suitableVolumeStoragePools, readyAndReusedVolumes); } + private boolean tryToFindPotentialPoolsToAlocateVolume(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo, + Map> suitableVolumeStoragePools, VolumeVO toBeCreated, DiskProfile diskProfile) { + for (StoragePoolAllocator allocator : _storagePoolAllocators) { + logger.debug("Trying to find suitable pools to allocate volume [{}] necessary to deploy VM [{}], using StoragePoolAllocator: [{}].", + toBeCreated.getUuid(), vmProfile.getUuid(), allocator.getClass().getSimpleName()); + + final List suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo); + if (suitablePools != null && !suitablePools.isEmpty()) { + logger.debug("StoragePoolAllocator [{}] found {} suitable pools to allocate volume [{}] necessary to deploy VM [{}].", + allocator.getClass().getSimpleName(), suitablePools.size(), toBeCreated.getUuid(), vmProfile.getUuid()); + checkForPreferredStoragePool(suitablePools, vmProfile.getVirtualMachine(), suitableVolumeStoragePools, toBeCreated); + return true; + } + } + return false; + } + + private boolean checkIfPoolCanBeReused(VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, + Map> suitableVolumeStoragePools, List readyAndReusedVolumes, + VolumeVO toBeCreated) { + logger.debug("Volume [{}] of VM [{}] has pool [{}] already specified. Checking if this pool can be reused.", toBeCreated.getUuid(), vmProfile.getUuid(), toBeCreated.getPoolId()); + List suitablePools = new ArrayList(); + StoragePool pool = null; + if (toBeCreated.getPoolId() != null) { + pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId()); + } else { + pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(plan.getPoolId()); + } + + if (!pool.isInMaintenance()) { + if (!avoid.shouldAvoid(pool)) { + return canReusePool(vmProfile, plan, suitableVolumeStoragePools, readyAndReusedVolumes, toBeCreated, suitablePools, pool); + } else { + logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in the avoid set. Need to reallocate a pool for this volume.", + pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + } + } else { + logger.debug("Pool [{}] of volume [{}] used by VM [{}] is in maintenance. Need to reallocate a pool for this volume.", + pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + } + return false; + } + + private boolean canReusePool(VirtualMachineProfile vmProfile, DeploymentPlan plan, + Map> suitableVolumeStoragePools, List readyAndReusedVolumes, + VolumeVO toBeCreated, List suitablePools, StoragePool pool) { + DataStore dataStore = dataStoreMgr.getPrimaryDataStore(pool.getId()); + + long exstPoolDcId = pool.getDataCenterId(); + long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1; + long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1; + + if (plan.getDataCenterId() == exstPoolDcId && ((plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId) || + (dataStore != null && dataStore.getScope() != null && dataStore.getScope().getScopeType() == ScopeType.ZONE))) { + logger.debug("Pool [{}] of volume [{}] used by VM [{}] fits the specified plan. No need to reallocate a pool for this volume.", + pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + suitablePools.add(pool); + suitableVolumeStoragePools.put(toBeCreated, suitablePools); + if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) { + readyAndReusedVolumes.add(toBeCreated); + } + return true; + } + + logger.debug("Pool [{}] of volume [{}] used by VM [{}] does not fit the specified plan. Need to reallocate a pool for this volume.", + pool.getUuid(), toBeCreated.getUuid(), vmProfile.getUuid()); + return false; + } + private void checkForPreferredStoragePool(List suitablePools, VirtualMachine vm, Map> suitableVolumeStoragePools, @@ -1972,6 +2016,9 @@ public boolean postStateTransitionEvent(StateMachine2.Transition t return true; } + public static String logDeploymentWithoutException(VirtualMachine vm, DeploymentPlan plan, ExcludeList avoids, DeploymentPlanner planner) { + return LogUtils.logGsonWithoutException("Trying to deploy VM [%s] and details: Plan [%s]; avoid list [%s] and planner: [%s].", vm, plan, avoids, planner); + } @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {allowRouterOnDisabledResource, allowAdminVmOnDisabledResource}; diff --git a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java index 9705f231e8b8..2b7e0450935f 100644 --- a/server/src/main/java/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/main/java/com/cloud/network/NetworkServiceImpl.java @@ -3248,7 +3248,7 @@ public Network updateGuestNetwork(final UpdateNetworkCmd cmd) { if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { logger.warn( "Existing IP reservation will become ineffective for the network with id = " + networkId + " You need to reapply reservation after network reimplementation."); - //set cidr to the newtork cidr + //set cidr to the network cidr network.setCidr(network.getNetworkCidr()); //set networkCidr to null to bring network back to no IP reservation state network.setNetworkCidr(null); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 5cfc1b6d0767..e162d8ff6d5b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1585,7 +1585,7 @@ protected void runInContext() { } private SetMonitorServiceCommand createMonitorServiceCommand(DomainRouterVO router, List services, - boolean reconfigure, boolean deleteFromProcessedCache) { + boolean reconfigure, boolean deleteFromProcessedCache, Map routerHealthCheckConfig) { final SetMonitorServiceCommand command = new SetMonitorServiceCommand(services); command.setAccessDetail(NetworkElementCommand.ROUTER_IP, _routerControlHelper.getRouterControlIp(router.getId())); command.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); @@ -1603,7 +1603,7 @@ private SetMonitorServiceCommand createMonitorServiceCommand(DomainRouterVO rout } command.setAccessDetail(SetMonitorServiceCommand.ROUTER_HEALTH_CHECKS_EXCLUDED, excludedTests); - command.setHealthChecksConfig(getRouterHealthChecksConfig(router)); + command.setHealthChecksConfig(routerHealthCheckConfig); command.setReconfigureAfterUpdate(reconfigure); command.setDeleteFromProcessedCache(deleteFromProcessedCache); // As part of updating return command; @@ -1628,7 +1628,7 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { logger.info("Updating data for router health checks for router " + router.getUuid()); Answer origAnswer = null; try { - SetMonitorServiceCommand command = createMonitorServiceCommand(router, null, true, true); + SetMonitorServiceCommand command = createMonitorServiceCommand(router, null, true, true, getRouterHealthChecksConfig(router)); origAnswer = _agentMgr.easySend(router.getHostId(), command); } catch (final Exception e) { logger.error("Error while sending update data for health check to router: " + router.getInstanceName(), e); @@ -1743,7 +1743,7 @@ private void updateWithLbRules(final DomainRouterJoinVO routerJoinVO, final Stri } } - private Map getRouterHealthChecksConfig(final DomainRouterVO router) { + protected Map getRouterHealthChecksConfig(final DomainRouterVO router) { Map data = new HashMap<>(); List routerJoinVOs = domainRouterJoinDao.searchByIds(router.getId()); StringBuilder vmsData = new StringBuilder(); @@ -1757,16 +1757,14 @@ private Map getRouterHealthChecksConfig(final DomainRouterVO rou } SearchBuilder sbvm = userVmJoinDao.createSearchBuilder(); sbvm.and("networkId", sbvm.entity().getNetworkId(), SearchCriteria.Op.EQ); + sbvm.and("state", sbvm.entity().getState(), SearchCriteria.Op.EQ); SearchCriteria scvm = sbvm.create(); scvm.setParameters("networkId", routerJoinVO.getNetworkId()); + scvm.setParameters("state", VirtualMachine.State.Running); List vms = userVmJoinDao.search(scvm, null); boolean isDhcpSupported = _ntwkSrvcDao.areServicesSupportedInNetwork(routerJoinVO.getNetworkId(), Service.Dhcp); boolean isDnsSupported = _ntwkSrvcDao.areServicesSupportedInNetwork(routerJoinVO.getNetworkId(), Service.Dns); for (UserVmJoinVO vm : vms) { - if (vm.getState() != VirtualMachine.State.Running) { - continue; - } - vmsData.append("vmName=").append(vm.getName()) .append(",macAddress=").append(vm.getMacAddress()) .append(",ip=").append(vm.getIpAddress()) @@ -2308,6 +2306,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine final Provider provider = getVrProvider(router); final List routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId()); + Map routerHealthChecksConfig = getRouterHealthChecksConfig(router); for (final Long guestNetworkId : routerGuestNtwkIds) { final AggregationControlCommand startCmd = new AggregationControlCommand(Action.Start, router.getInstanceName(), controlNic.getIPv4Address(), _routerControlHelper.getRouterIpInNetwork( guestNetworkId, router.getId())); @@ -2316,7 +2315,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine if (reprogramGuestNtwks) { finalizeIpAssocForNetwork(cmds, router, provider, guestNetworkId, null); finalizeNetworkRulesForNetwork(cmds, router, provider, guestNetworkId); - finalizeMonitorService(cmds, profile, router, provider, guestNetworkId, true); + finalizeMonitorService(cmds, profile, router, provider, guestNetworkId, true, routerHealthChecksConfig); } finalizeUserDataAndDhcpOnStart(cmds, router, provider, guestNetworkId); @@ -2330,7 +2329,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine } protected void finalizeMonitorService(final Commands cmds, final VirtualMachineProfile profile, final DomainRouterVO router, final Provider provider, - final long networkId, boolean onStart) { + final long networkId, boolean onStart, Map routerHealthCheckConfig) { final NetworkOffering offering = _networkOfferingDao.findById(_networkDao.findById(networkId).getNetworkOfferingId()); if (offering.isRedundantRouter()) { // service monitoring is currently not added in RVR @@ -2380,7 +2379,7 @@ protected void finalizeMonitorService(final Commands cmds, final VirtualMachineP } // As part of aggregate command we don't need to reconfigure if onStart and persist in processed cache. Subsequent updates are not needed. - SetMonitorServiceCommand command = createMonitorServiceCommand(router, servicesTO, !onStart, false); + SetMonitorServiceCommand command = createMonitorServiceCommand(router, servicesTO, !onStart, false, routerHealthCheckConfig); command.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, _routerControlHelper.getRouterIpInNetwork(networkId, router.getId())); if (!isMonitoringServicesEnabled) { command.setAccessDetail(SetMonitorServiceCommand.ROUTER_MONITORING_ENABLED, isMonitoringServicesEnabled.toString()); diff --git a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index c09df8d86557..69f7555696b1 100644 --- a/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -495,8 +495,9 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine throw new CloudRuntimeException("Cannot find related provider of virtual router provider: " + vrProvider.getType().toString()); } + Map routerHealthCheckConfig = getRouterHealthChecksConfig(domainRouterVO); if (reprogramGuestNtwks && publicNics.size() > 0) { - finalizeMonitorService(cmds, profile, domainRouterVO, provider, publicNics.get(0).second().getId(), true); + finalizeMonitorService(cmds, profile, domainRouterVO, provider, publicNics.get(0).second().getId(), true, routerHealthCheckConfig); } for (final Pair nicNtwk : guestNics) { @@ -508,7 +509,7 @@ public boolean finalizeCommandsOnStart(final Commands cmds, final VirtualMachine if (reprogramGuestNtwks) { finalizeIpAssocForNetwork(cmds, domainRouterVO, provider, guestNetworkId, vlanMacAddress); finalizeNetworkRulesForNetwork(cmds, domainRouterVO, provider, guestNetworkId); - finalizeMonitorService(cmds, profile, domainRouterVO, provider, guestNetworkId, true); + finalizeMonitorService(cmds, profile, domainRouterVO, provider, guestNetworkId, true, routerHealthCheckConfig); } finalizeUserDataAndDhcpOnStart(cmds, domainRouterVO, provider, guestNetworkId); @@ -567,7 +568,7 @@ protected boolean sendNetworkRulesToRouter(final long routerId, final long netwo finalizeNetworkRulesForNetwork(cmds, router, provider, networkId); } - finalizeMonitorService(cmds, getVirtualMachineProfile(router), router, provider, networkId, false); + finalizeMonitorService(cmds, getVirtualMachineProfile(router), router, provider, networkId, false, getRouterHealthChecksConfig(router)); return _nwHelper.sendCommandsToRouter(router, cmds); } diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 4cb909648d88..0ccb6c84fb83 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -617,7 +617,7 @@ public void updateKeyPairs() { // FIXME: take a global database lock here for safety. boolean onWindows = isOnWindows(); if(!onWindows) { - Script.runSimpleBashScript("if [ -f " + privkeyfile + " ]; then rm -f " + privkeyfile + "; fi; ssh-keygen -t ed25519 -m PEM -N '' -f " + privkeyfile + " -q 2>/dev/null || ssh-keygen -t ed25519 -N '' -f " + privkeyfile + " -q"); + Script.runSimpleBashScript("if [ -f " + privkeyfile + " ]; then rm -f " + privkeyfile + "; fi; ssh-keygen -t ecdsa -m PEM -N '' -f " + privkeyfile + " -q 2>/dev/null || ssh-keygen -t ecdsa -N '' -f " + privkeyfile + " -q"); } final String privateKey; diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index c23d23f7231e..0406ba04f8ce 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -551,6 +551,7 @@ import org.apache.cloudstack.api.command.user.volume.AssignVolumeCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ChangeOfferingForVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DeleteVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DestroyVolumeCmd; @@ -3790,6 +3791,7 @@ public List> getCommands() { cmdList.add(ListVMGroupsCmd.class); cmdList.add(UpdateVMGroupCmd.class); cmdList.add(AttachVolumeCmd.class); + cmdList.add(CheckAndRepairVolumeCmd.class); cmdList.add(CreateVolumeCmd.class); cmdList.add(DeleteVolumeCmd.class); cmdList.add(UpdateVolumeCmd.class); diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 6623d8dcde8f..f9ad0f519664 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -1710,17 +1710,21 @@ protected void runInContext() { storagePoolStats.put(pool.getId(), (StorageStats)answer); boolean poolNeedsUpdating = false; + long capacityBytes = ((StorageStats)answer).getCapacityBytes(); + long usedBytes = ((StorageStats)answer).getByteUsed(); // Seems like we have dynamically updated the pool size since the prev. size and the current do not match - if (_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getCapacityBytes() != ((StorageStats)answer).getCapacityBytes()) { - if (((StorageStats)answer).getCapacityBytes() > 0) { - pool.setCapacityBytes(((StorageStats)answer).getCapacityBytes()); + if ((_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getCapacityBytes() != capacityBytes) + || pool.getCapacityBytes() != capacityBytes) { + if (capacityBytes > 0) { + pool.setCapacityBytes(capacityBytes); poolNeedsUpdating = true; } else { logger.warn("Not setting capacity bytes, received " + ((StorageStats)answer).getCapacityBytes() + " capacity for pool ID " + poolId); } } - if (pool.getUsedBytes() != ((StorageStats)answer).getByteUsed() && (pool.getStorageProviderName().equalsIgnoreCase(DataStoreProvider.DEFAULT_PRIMARY) || _storageManager.canPoolProvideStorageStats(pool))) { - pool.setUsedBytes(((StorageStats) answer).getByteUsed()); + if (((_storagePoolStats.get(poolId) != null && _storagePoolStats.get(poolId).getByteUsed() != usedBytes) + || pool.getUsedBytes() != usedBytes) && (pool.getStorageProviderName().equalsIgnoreCase(DataStoreProvider.DEFAULT_PRIMARY) || _storageManager.canPoolProvideStorageStats(pool))) { + pool.setUsedBytes(usedBytes); poolNeedsUpdating = true; } if (poolNeedsUpdating) { diff --git a/server/src/main/java/com/cloud/storage/CheckAndRepairVolumePayload.java b/server/src/main/java/com/cloud/storage/CheckAndRepairVolumePayload.java new file mode 100644 index 000000000000..eabe1a4c7b81 --- /dev/null +++ b/server/src/main/java/com/cloud/storage/CheckAndRepairVolumePayload.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.storage; + +public class CheckAndRepairVolumePayload { + + public final String repair; + public String result; + + public CheckAndRepairVolumePayload(String repair) { + this.repair = repair; + } + + public String getRepair() { + return repair; + } + + public String getResult() { + return result; + } + + public void setResult(String result) { + this.result = result; + } + +} diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 887bbc91d021..9ab446e55da7 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -42,6 +42,7 @@ import org.apache.cloudstack.api.command.user.volume.AssignVolumeCmd; import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ChangeOfferingForVolumeCmd; +import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd; @@ -216,6 +217,7 @@ import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VmWork; import com.cloud.vm.VmWorkAttachVolume; +import com.cloud.vm.VmWorkCheckAndRepairVolume; import com.cloud.vm.VmWorkConstants; import com.cloud.vm.VmWorkDetachVolume; import com.cloud.vm.VmWorkExtractVolume; @@ -377,6 +379,9 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic public static ConfigKey storageTagRuleExecutionTimeout = new ConfigKey<>("Advanced", Long.class, "storage.tag.rule.execution.timeout", "2000", "The maximum runtime," + " in milliseconds, to execute a storage tag rule; if it is reached, a timeout will happen.", true); + public static final ConfigKey AllowCheckAndRepairVolume = new ConfigKey("Advanced", Boolean.class, "volume.check.and.repair.leaks.before.use", "false", + "To check and repair the volume if it has any leaks before performing volume attach or VM start operations", true, ConfigKey.Scope.StoragePool); + private final StateMachine2 _volStateMachine; private static final Set STATES_VOLUME_CANNOT_BE_DESTROYED = new HashSet<>(Arrays.asList(Volume.State.Destroy, Volume.State.Expunging, Volume.State.Expunged, Volume.State.Allocated)); @@ -1333,7 +1338,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation was interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution exception", e); } @@ -1816,7 +1821,158 @@ public void publishVolumeCreationUsageEvent(Volume volume) { logger.debug(String.format("Volume [%s] has been successfully recovered, thus a new usage event %s has been published.", volume.getUuid(), EventTypes.EVENT_VOLUME_CREATE)); } + @Override + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CHECK, eventDescription = "checking volume and repair if needed", async = true) + public Pair checkAndRepairVolume(CheckAndRepairVolumeCmd cmd) throws ResourceAllocationException { + long volumeId = cmd.getId(); + String repair = cmd.getRepair(); + final VolumeVO volume = _volsDao.findById(volumeId); + validationsForCheckVolumeOperation(volume); + + Long vmId = volume.getInstanceId(); + if (vmId != null) { + // serialize VM operation + return handleCheckAndRepairVolumeJob(vmId, volumeId, repair); + } else { + return handleCheckAndRepairVolume(volumeId, repair); + } + } + + private Pair handleCheckAndRepairVolume(Long volumeId, String repair) { + CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); + VolumeInfo volumeInfo = volFactory.getVolume(volumeId); + volumeInfo.addPayload(payload); + + Pair result = volService.checkAndRepairVolume(volumeInfo); + return result; + } + + private Pair handleCheckAndRepairVolumeJob(Long vmId, Long volumeId, String repair) throws ResourceAllocationException { + AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); + if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { + // avoid re-entrance + VmWorkJobVO placeHolder = null; + placeHolder = createPlaceHolderWork(vmId); + try { + Pair result = orchestrateCheckAndRepairVolume(volumeId, repair); + return result; + } finally { + _workJobDao.expunge(placeHolder.getId()); + } + } else { + Outcome outcome = checkAndRepairVolumeThroughJobQueue(vmId, volumeId, repair); + try { + outcome.get(); + } catch (InterruptedException e) { + throw new RuntimeException("Operation is interrupted", e); + } catch (ExecutionException e) { + throw new RuntimeException("Execution exception--", e); + } + + Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob()); + if (jobResult != null) { + if (jobResult instanceof ConcurrentOperationException) { + throw (ConcurrentOperationException)jobResult; + } else if (jobResult instanceof ResourceAllocationException) { + throw (ResourceAllocationException)jobResult; + } else if (jobResult instanceof Throwable) { + throw new RuntimeException("Unexpected exception", (Throwable)jobResult); + } + } + + // retrieve the entity url from job result + if (jobResult != null && jobResult instanceof Pair) { + return (Pair) jobResult; + } + + return null; + } + } + + protected void validationsForCheckVolumeOperation(VolumeVO volume) { + Account caller = CallContext.current().getCallingAccount(); + _accountMgr.checkAccess(caller, null, true, volume); + + String volumeName = volume.getName(); + Long vmId = volume.getInstanceId(); + if (vmId != null) { + validateVMforCheckVolumeOperation(vmId, volumeName); + } + + if (volume.getState() != Volume.State.Ready) { + throw new InvalidParameterValueException(String.format("Volume: %s is not in Ready state", volumeName)); + } + + HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); + if (!HypervisorType.KVM.equals(hypervisorType)) { + throw new InvalidParameterValueException(String.format("Check and Repair volumes is supported only for KVM hypervisor")); + } + + if (!Arrays.asList(ImageFormat.QCOW2, ImageFormat.VDI).contains(volume.getFormat())) { + throw new InvalidParameterValueException("Volume format is not supported for checking and repair"); + } + } + + private void validateVMforCheckVolumeOperation(Long vmId, String volumeName) { + Account caller = CallContext.current().getCallingAccount(); + UserVmVO vm = _userVmDao.findById(vmId); + if (vm == null) { + throw new InvalidParameterValueException(String.format("VM not found, please check the VM to which this volume %s is attached", volumeName)); + } + + _accountMgr.checkAccess(caller, null, true, vm); + + if (vm.getState() != State.Stopped) { + throw new InvalidParameterValueException(String.format("VM to which the volume %s is attached should be in stopped state", volumeName)); + } + } + + private Pair orchestrateCheckAndRepairVolume(Long volumeId, String repair) { + + VolumeInfo volume = volFactory.getVolume(volumeId); + + if (volume == null) { + throw new InvalidParameterValueException("Checking volume and repairing failed due to volume:" + volumeId + " doesn't exist"); + } + + CheckAndRepairVolumePayload payload = new CheckAndRepairVolumePayload(repair); + volume.addPayload(payload); + + return volService.checkAndRepairVolume(volume); + } + + public Outcome checkAndRepairVolumeThroughJobQueue(final Long vmId, final Long volumeId, String repair) { + + final CallContext context = CallContext.current(); + final User callingUser = context.getCallingUser(); + final Account callingAccount = context.getCallingAccount(); + + final VMInstanceVO vm = _vmInstanceDao.findById(vmId); + + VmWorkJobVO workJob = new VmWorkJobVO(context.getContextId()); + + workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); + workJob.setCmd(VmWorkCheckAndRepairVolume.class.getName()); + + workJob.setAccountId(callingAccount.getId()); + workJob.setUserId(callingUser.getId()); + workJob.setStep(VmWorkJobVO.Step.Starting); + workJob.setVmType(VirtualMachine.Type.Instance); + workJob.setVmInstanceId(vm.getId()); + workJob.setRelated(AsyncJobExecutionContext.getOriginJobId()); + + // save work context info (there are some duplications) + VmWorkCheckAndRepairVolume workInfo = new VmWorkCheckAndRepairVolume(callingUser.getId(), callingAccount.getId(), vm.getId(), + VolumeApiServiceImpl.VM_WORK_JOB_HANDLER, volumeId, repair); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); + + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); + + return new VmJobCheckAndRepairVolumeOutcome(workJob); + } @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_CHANGE_DISK_OFFERING, eventDescription = "Changing disk offering of a volume") @@ -1986,7 +2142,7 @@ private VolumeVO resizeVolumeInternal(VolumeVO volume, DiskOfferingVO newDiskOff outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation was interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution exception", e); } @@ -2773,7 +2929,7 @@ public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution excetion", e); } @@ -3181,7 +3337,7 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution excetion", e); } @@ -3510,7 +3666,7 @@ private Snapshot takeSnapshotInternal(Long volumeId, Long policyId, Long snapsho outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution excetion", e); } @@ -3827,7 +3983,7 @@ public String extractVolume(ExtractVolumeCmd cmd) { outcome.get(); } catch (InterruptedException e) { throw new RuntimeException("Operation is interrupted", e); - } catch (java.util.concurrent.ExecutionException e) { + } catch (ExecutionException e) { throw new RuntimeException("Execution excetion", e); } @@ -4249,6 +4405,12 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L try { // if we don't have a host, the VM we are attaching the disk to has never been started before if (host != null) { + try { + volService.checkAndRepairVolumeBasedOnConfig(volFactory.getVolume(volumeToAttach.getId()), host); + } catch (Exception e) { + logger.debug(String.format("Unable to check and repair volume [%s] on host [%s], due to %s.", volumeToAttach.getName(), host, e.getMessage())); + } + try { volService.grantAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); } catch (Exception e) { @@ -4594,6 +4756,24 @@ protected Snapshot retrieve() { } } + public class VmJobCheckAndRepairVolumeOutcome extends OutcomeImpl { + + public VmJobCheckAndRepairVolumeOutcome(final AsyncJob job) { + super(Pair.class, job, VmJobCheckInterval.value(), new Predicate() { + @Override + public boolean checkCondition() { + AsyncJobVO jobVo = _entityMgr.findById(AsyncJobVO.class, job.getId()); + assert (jobVo != null); + if (jobVo == null || jobVo.getStatus() != JobInfo.Status.IN_PROGRESS) { + return true; + } + + return false; + } + }, AsyncJob.Topics.JOB_STATE); + } + } + public Outcome attachVolumeToVmThroughJobQueue(final Long vmId, final Long volumeId, final Long deviceId) { final CallContext context = CallContext.current(); @@ -4831,6 +5011,13 @@ private Pair orchestrateTakeVolumeSnapshot(VmWorkTakeVol return new Pair(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(work.getSnapshotId())); } + @ReflectionUse + private Pair orchestrateCheckAndRepairVolume(VmWorkCheckAndRepairVolume work) throws Exception { + Account account = _accountDao.findById(work.getAccountId()); + Pair result = orchestrateCheckAndRepairVolume(work.getVolumeId(), work.getRepair()); + return new Pair(JobInfo.Status.SUCCEEDED, _jobMgr.marshallResultObject(result)); + } + @Override public Pair handleVmWorkJob(VmWork work) throws Exception { return _jobHandlerProxy.handleVmWorkJob(work); @@ -4867,7 +5054,8 @@ public ConfigKey[] getConfigKeys() { AllowUserExpungeRecoverVolume, MatchStoragePoolTagsWithDiskOffering, UseHttpsToUpload, - WaitDetachDevice + WaitDetachDevice, + AllowCheckAndRepairVolume }; } } diff --git a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java index 6d6a05cac19e..3398e3ba571b 100644 --- a/server/src/main/java/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/main/java/com/cloud/usage/UsageServiceImpl.java @@ -27,6 +27,7 @@ import javax.naming.ConfigurationException; import com.cloud.domain.Domain; +import com.cloud.utils.DateUtil; import org.apache.cloudstack.api.command.admin.usage.GenerateUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.ListUsageRecordsCmd; import org.apache.cloudstack.api.command.admin.usage.RemoveRawUsageRecordsCmd; @@ -36,6 +37,7 @@ import org.apache.cloudstack.usage.Usage; import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; +import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import org.jetbrains.annotations.NotNull; import org.springframework.stereotype.Component; @@ -97,7 +99,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag private ConfigurationDao _configDao; @Inject private ProjectManager _projectMgr; - private TimeZone _usageTimezone; + private TimeZone _usageTimezone = TimeZone.getTimeZone("GMT"); @Inject private AccountService _accountService; @Inject @@ -127,10 +129,7 @@ public UsageServiceImpl() { @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); - String timeZoneStr = _configDao.getValue(Config.UsageAggregationTimezone.toString()); - if (timeZoneStr == null) { - timeZoneStr = "GMT"; - } + String timeZoneStr = ObjectUtils.defaultIfNull(_configDao.getValue(Config.UsageAggregationTimezone.toString()), "GMT"); _usageTimezone = TimeZone.getTimeZone(timeZoneStr); return true; } @@ -211,10 +210,10 @@ public Pair, Integer> getUsageRecords(ListUsageRecordsCmd Date adjustedStartDate = computeAdjustedTime(startDate, usageTZ); Date adjustedEndDate = computeAdjustedTime(endDate, usageTZ); - if (logger.isDebugEnabled()) { - logger.debug("getting usage records for account: " + accountId + ", domainId: " + domainId + ", between " + adjustedStartDate + " and " + adjustedEndDate + - ", using pageSize: " + cmd.getPageSizeVal() + " and startIndex: " + cmd.getStartIndex()); - } + logger.debug("Getting usage records for account ID [{}], domain ID [{}] between [{}] and [{}] using page size [{}] and start index [{}].", + accountId, domainId, DateUtil.displayDateInTimezone(_usageTimezone, adjustedStartDate), + DateUtil.displayDateInTimezone(_usageTimezone, adjustedEndDate), cmd.getPageSizeVal(), + cmd.getStartIndex()); Filter usageFilter = new Filter(UsageVO.class, "id", true, cmd.getStartIndex(), cmd.getPageSizeVal()); diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 1a30f192173a..a95b660975b9 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -2064,13 +2064,20 @@ public AccountVO updateAccount(UpdateAccountCmd cmd) { @Override @ActionEvent(eventType = EventTypes.EVENT_USER_DELETE, eventDescription = "deleting User") public boolean deleteUser(DeleteUserCmd deleteUserCmd) { - UserVO user = getValidUserVO(deleteUserCmd.getId()); - + final Long id = deleteUserCmd.getId(); + User caller = CallContext.current().getCallingUser(); + UserVO user = getValidUserVO(id); Account account = _accountDao.findById(user.getAccountId()); + if (caller.getId() == id) { + Domain domain = _domainDao.findById(account.getDomainId()); + throw new InvalidParameterValueException(String.format("The caller is requesting to delete itself. As a security measure, ACS will not allow this operation." + + " To delete user %s (ID: %s, Domain: %s), request to another user with permission to execute the operation.", user.getUsername(), user.getUuid(), domain.getUuid())); + } + // don't allow to delete the user from the account of type Project checkAccountAndAccess(user, account); - return _userDao.remove(deleteUserCmd.getId()); + return _userDao.remove(id); } @Override @@ -2145,7 +2152,7 @@ private void checkIfNotMovingAcrossDomains(long domainId, Account newAccount) { } } - private void checkAccountAndAccess(UserVO user, Account account) { + protected void checkAccountAndAccess(UserVO user, Account account) { // don't allow to delete the user from the account of type Project if (account.getType() == Account.Type.PROJECT) { throw new InvalidParameterValueException("Project users cannot be deleted or moved."); @@ -2155,7 +2162,7 @@ private void checkAccountAndAccess(UserVO user, Account account) { CallContext.current().putContextParameter(User.class, user.getUuid()); } - private UserVO getValidUserVO(long id) { + protected UserVO getValidUserVO(long id) { UserVO user = _userDao.findById(id); if (user == null || user.getRemoved() != null) { diff --git a/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java index 09fcde24d79d..35c54a2f6d87 100644 --- a/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java +++ b/server/src/test/java/com/cloud/api/query/QueryManagerImplTest.java @@ -348,7 +348,7 @@ public void testSearchForBuckets() { @Test public void testGetHostTagsFromTemplateForServiceOfferingsListingNoTemplateId() { - CollectionUtils.isEmpty(queryManager.getHostTagsFromTemplateForServiceOfferingsListing(Mockito.mock(AccountVO.class), null)); + Assert.assertTrue(CollectionUtils.isEmpty(queryManager.getHostTagsFromTemplateForServiceOfferingsListing(Mockito.mock(AccountVO.class), null))); } @Test(expected = InvalidParameterValueException.class) diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java index 38189313a52b..958a39be4103 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerImplTest.java @@ -21,6 +21,18 @@ import com.cloud.utils.net.NetUtils; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.domain.Domain; +import com.cloud.domain.dao.DomainDao; +import com.cloud.offering.DiskOffering; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.user.Account; +import com.cloud.user.User; +import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.api.command.admin.offering.UpdateDiskOfferingCmd; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,7 +41,10 @@ import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; +import org.mockito.InjectMocks; +import org.mockito.Spy; +import java.util.ArrayList; import java.util.List; @@ -37,7 +52,40 @@ public class ConfigurationManagerImplTest { @Mock ConfigDepot configDepot; + @InjectMocks ConfigurationManagerImpl configurationManagerImplSpy = Mockito.spy(new ConfigurationManagerImpl()); + @Mock + SearchCriteria searchCriteriaDiskOfferingDetailMock; + @Mock + DiskOffering diskOfferingMock; + @Mock + Account accountMock; + @Mock + User userMock; + @Mock + Domain domainMock; + @Mock + DataCenterDao zoneDaoMock; + @Mock + DomainDao domainDaoMock; + @Mock + EntityManager entityManagerMock; + @Mock + DiskOfferingDetailsDao diskOfferingDetailsDao; + @Spy + DiskOfferingVO diskOfferingVOSpy; + @Mock + UpdateDiskOfferingCmd updateDiskOfferingCmdMock; + + Long validId = 1L; + Long invalidId = 100L; + List filteredZoneIds = List.of(1L, 2L, 3L); + List existingZoneIds = List.of(1L, 2L, 3L); + List filteredDomainIds = List.of(1L, 2L, 3L); + List existingDomainIds = List.of(1L, 2L, 3L); + List emptyExistingZoneIds = new ArrayList<>(); + List emptyExistingDomainIds = new ArrayList<>(); + List emptyFilteredDomainIds = new ArrayList<>(); @Before public void setUp() throws Exception { @@ -50,6 +98,7 @@ public void validateIfIntValueIsInRangeTestValidValueReturnNull() { Assert.assertNull(testVariable); } + @Test public void validateIfIntValueIsInRangeTestInvalidValueReturnString() { String testVariable = configurationManagerImplSpy.validateIfIntValueIsInRange("String name", "9", "1-5"); @@ -250,4 +299,112 @@ public void testValidateIpAddressRelatedConfigValuesValidIpRange() { Mockito.doReturn(key).when(configurationManagerImplSpy._configDepot).get("config.iprange"); configurationManagerImplSpy.validateIpAddressRelatedConfigValues("config.iprange", "192.168.1.1-192.168.1.100"); } + + @Test + public void validateDomainTestInvalidIdThrowException() { + Mockito.doReturn(null).when(domainDaoMock).findById(invalidId); + Assert.assertThrows(InvalidParameterValueException.class, () -> configurationManagerImplSpy.validateDomain(List.of(invalidId))); + } + + @Test + public void validateZoneTestInvalidIdThrowException() { + Mockito.doReturn(null).when(zoneDaoMock).findById(invalidId); + Assert.assertThrows(InvalidParameterValueException.class, () -> configurationManagerImplSpy.validateZone(List.of(invalidId))); + } + + @Test + public void updateDiskOfferingIfCmdAttributeNotNullTestNotNullValueUpdateOfferingAttribute() { + Mockito.doReturn("DiskOfferingName").when(updateDiskOfferingCmdMock).getDiskOfferingName(); + Mockito.doReturn("DisplayText").when(updateDiskOfferingCmdMock).getDisplayText(); + Mockito.doReturn(1).when(updateDiskOfferingCmdMock).getSortKey(); + Mockito.doReturn(false).when(updateDiskOfferingCmdMock).getDisplayOffering(); + + configurationManagerImplSpy.updateDiskOfferingIfCmdAttributeNotNull(diskOfferingVOSpy, updateDiskOfferingCmdMock); + + Assert.assertEquals(updateDiskOfferingCmdMock.getDiskOfferingName(), diskOfferingVOSpy.getName()); + Assert.assertEquals(updateDiskOfferingCmdMock.getDisplayText(), diskOfferingVOSpy.getDisplayText()); + Assert.assertEquals(updateDiskOfferingCmdMock.getSortKey(), (Integer) diskOfferingVOSpy.getSortKey()); + Assert.assertEquals(updateDiskOfferingCmdMock.getDisplayOffering(), diskOfferingVOSpy.getDisplayOffering()); + } + + @Test + public void updateDiskOfferingIfCmdAttributeNotNullTestNullValueDoesntUpdateOfferingAttribute() { + Mockito.doReturn("Name").when(diskOfferingVOSpy).getName(); + Mockito.doReturn("DisplayText").when(diskOfferingVOSpy).getDisplayText(); + Mockito.doReturn(1).when(diskOfferingVOSpy).getSortKey(); + Mockito.doReturn(true).when(diskOfferingVOSpy).getDisplayOffering(); + + configurationManagerImplSpy.updateDiskOfferingIfCmdAttributeNotNull(diskOfferingVOSpy, updateDiskOfferingCmdMock); + + Assert.assertNotEquals(updateDiskOfferingCmdMock.getDiskOfferingName(), diskOfferingVOSpy.getName()); + Assert.assertNotEquals(updateDiskOfferingCmdMock.getDisplayText(), diskOfferingVOSpy.getDisplayText()); + Assert.assertNotEquals(updateDiskOfferingCmdMock.getSortKey(), (Integer) diskOfferingVOSpy.getSortKey()); + Assert.assertNotEquals(updateDiskOfferingCmdMock.getDisplayOffering(), diskOfferingVOSpy.getDisplayOffering()); + } + + @Test + public void updateDiskOfferingDetailsDomainIdsTestDifferentDomainIdsDiskOfferingDetailsAddDomainIds() { + List detailsVO = new ArrayList<>(); + Long diskOfferingId = validId; + + configurationManagerImplSpy.updateDiskOfferingDetailsDomainIds(detailsVO, searchCriteriaDiskOfferingDetailMock, diskOfferingId, filteredDomainIds, existingDomainIds); + + for (int i = 0; i < detailsVO.size(); i++) { + Assert.assertEquals(filteredDomainIds.get(i), (Long) Long.parseLong(detailsVO.get(i).getValue())); + } + } + + @Test + public void checkDomainAdminUpdateOfferingRestrictionsTestDifferentZoneIdsThrowException() { + Assert.assertThrows(InvalidParameterValueException.class, + () -> configurationManagerImplSpy.checkDomainAdminUpdateOfferingRestrictions(diskOfferingMock, userMock, filteredZoneIds, emptyExistingZoneIds, existingDomainIds, filteredDomainIds)); + } + + @Test + public void checkDomainAdminUpdateOfferingRestrictionsTestEmptyExistingDomainIdsThrowException() { + Assert.assertThrows(InvalidParameterValueException.class, + () -> configurationManagerImplSpy.checkDomainAdminUpdateOfferingRestrictions(diskOfferingMock, userMock, filteredZoneIds, existingZoneIds, emptyExistingDomainIds, filteredDomainIds)); + } + + @Test + public void checkDomainAdminUpdateOfferingRestrictionsTestEmptyFilteredDomainIdsThrowException() { + Assert.assertThrows(InvalidParameterValueException.class, + () -> configurationManagerImplSpy.checkDomainAdminUpdateOfferingRestrictions(diskOfferingMock, userMock, filteredZoneIds, existingZoneIds, existingDomainIds, emptyFilteredDomainIds)); + } + + @Test + public void getAccountNonChildDomainsTestValidValuesReturnChildDomains() { + Mockito.doReturn(null).when(updateDiskOfferingCmdMock).getSortKey(); + List nonChildDomains = configurationManagerImplSpy.getAccountNonChildDomains(diskOfferingMock, accountMock, userMock, updateDiskOfferingCmdMock, existingDomainIds); + + for (int i = 0; i < existingDomainIds.size(); i++) { + Assert.assertEquals(existingDomainIds.get(i), nonChildDomains.get(i)); + } + } + + @Test + public void getAccountNonChildDomainsTestAllDomainsAreChildDomainsReturnEmptyList() { + for (Long existingDomainId : existingDomainIds) { + Mockito.when(domainDaoMock.isChildDomain(accountMock.getDomainId(), existingDomainId)).thenReturn(true); + } + + List nonChildDomains = configurationManagerImplSpy.getAccountNonChildDomains(diskOfferingMock, accountMock, userMock, updateDiskOfferingCmdMock, existingDomainIds); + + Assert.assertTrue(nonChildDomains.isEmpty()); + } + + @Test + public void getAccountNonChildDomainsTestNotNullCmdAttributeThrowException() { + Mockito.doReturn("name").when(updateDiskOfferingCmdMock).getDiskOfferingName(); + + Assert.assertThrows(InvalidParameterValueException.class, () -> configurationManagerImplSpy.getAccountNonChildDomains(diskOfferingMock, accountMock, userMock, updateDiskOfferingCmdMock, existingDomainIds)); + } + + @Test + public void checkIfDomainIsChildDomainTestNonChildDomainThrowException() { + Mockito.doReturn(false).when(domainDaoMock).isChildDomain(Mockito.anyLong(), Mockito.anyLong()); + Mockito.doReturn(domainMock).when(entityManagerMock).findById(Domain.class, 1L); + + Assert.assertThrows(InvalidParameterValueException.class, () -> configurationManagerImplSpy.checkIfDomainIsChildDomain(diskOfferingMock, accountMock, userMock, filteredDomainIds)); + } } diff --git a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java index ea04a09cf92c..3afd3dc4a958 100644 --- a/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java +++ b/server/src/test/java/com/cloud/deploy/DeploymentPlanningManagerImplTest.java @@ -252,7 +252,9 @@ public void testSetUp() { Mockito.when(template.isDeployAsIs()).thenReturn(false); Mockito.when(templateDao.findById(Mockito.anyLong())).thenReturn(template); - VMInstanceVO vm = new VMInstanceVO(); + VMInstanceVO vm = Mockito.mock(VMInstanceVO.class); + Mockito.when(vm.getType()).thenReturn(Type.Instance); + Mockito.when(vm.getLastHostId()).thenReturn(null); Mockito.when(vmProfile.getVirtualMachine()).thenReturn(vm); Mockito.when(vmProfile.getId()).thenReturn(instanceId); diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 4f32d2531a4c..043f62fc8030 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -25,6 +25,7 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -39,6 +40,7 @@ import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; @@ -1642,7 +1644,6 @@ public void testStoragePoolCompatibilityAndAllowEncryptedVolumeMigrationForPower Mockito.when(_diskOfferingDao.findById(1L)).thenReturn(diskOffering); StoragePoolVO srcStoragePoolVOMock = Mockito.mock(StoragePoolVO.class); - StoragePool destPool = Mockito.mock(StoragePool.class); PrimaryDataStore dataStore = Mockito.mock(PrimaryDataStore.class); Mockito.when(vol.getPassphraseId()).thenReturn(1L); @@ -1657,4 +1658,166 @@ public void testStoragePoolCompatibilityAndAllowEncryptedVolumeMigrationForPower // test passed } } + + @Test + public void testValidationsForCheckVolumeAPI() { + VolumeVO volume = mock(VolumeVO.class); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + UserVmVO vm = mock(UserVmVO.class); + when(userVmDaoMock.findById(1L)).thenReturn(vm); + when(vm.getState()).thenReturn(State.Stopped); + when(volume.getState()).thenReturn(Volume.State.Ready); + when(volume.getId()).thenReturn(1L); + when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.KVM); + when(volume.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidationsForCheckVolumeAPIWithRunningVM() { + VolumeVO volume = mock(VolumeVO.class); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + UserVmVO vm = mock(UserVmVO.class); + when(userVmDaoMock.findById(1L)).thenReturn(vm); + when(vm.getState()).thenReturn(State.Running); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidationsForCheckVolumeAPIWithNonexistedVM() { + VolumeVO volume = mock(VolumeVO.class); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + when(userVmDaoMock.findById(1L)).thenReturn(null); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidationsForCheckVolumeAPIWithAllocatedVolume() { + VolumeVO volume = mock(VolumeVO.class); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + UserVmVO vm = mock(UserVmVO.class); + when(userVmDaoMock.findById(1L)).thenReturn(vm); + when(vm.getState()).thenReturn(State.Stopped); + when(volume.getState()).thenReturn(Volume.State.Allocated); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidationsForCheckVolumeAPIWithNonKVMhypervisor() { + VolumeVO volume = mock(VolumeVO.class); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + UserVmVO vm = mock(UserVmVO.class); + when(userVmDaoMock.findById(1L)).thenReturn(vm); + when(vm.getState()).thenReturn(State.Stopped); + when(volume.getState()).thenReturn(Volume.State.Ready); + when(volume.getId()).thenReturn(1L); + when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.VMware); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } + + @Test + public void testCheckAndRepairVolume() throws ResourceAllocationException { + + CheckAndRepairVolumeCmd cmd = mock(CheckAndRepairVolumeCmd.class); + when(cmd.getId()).thenReturn(1L); + when(cmd.getRepair()).thenReturn(null); + + VolumeVO volume = mock(VolumeVO.class); + when(volumeDaoMock.findById(1L)).thenReturn(volume); + + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(null); + when(volume.getState()).thenReturn(Volume.State.Ready); + when(volume.getId()).thenReturn(1L); + when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.KVM); + + VolumeInfo volumeInfo = mock(VolumeInfo.class); + when(volumeDataFactoryMock.getVolume(1L)).thenReturn(volumeInfo); + + String checkResult = "{\n" + + " \"image-end-offset\": 6442582016,\n" + + " \"total-clusters\": 163840,\n" + + " \"check-errors\": 0,\n" + + " \"leaks\": 124,\n" + + " \"allocated-clusters\": 98154,\n" + + " \"filename\": \"/var/lib/libvirt/images/26be20c7-b9d0-43f6-a76e-16c70737a0e0\",\n" + + " \"format\": \"qcow2\",\n" + + " \"fragmented-clusters\": 96135\n" + + "}"; + + String repairResult = null; + Pair result = new Pair<>(checkResult, repairResult); + when(volumeServiceMock.checkAndRepairVolume(volumeInfo)).thenReturn(result); + when(volume.getFormat()).thenReturn(Storage.ImageFormat.QCOW2); + + Pair finalresult = volumeApiServiceImpl.checkAndRepairVolume(cmd); + + Assert.assertEquals(result, finalresult); + } + + @Test(expected = InvalidParameterValueException.class) + public void testValidationsForCheckVolumeAPIWithInvalidVolumeFormat() { + VolumeVO volume = mock(VolumeVO.class); + AccountVO account = new AccountVO("admin", 1L, "networkDomain", Account.Type.NORMAL, "uuid"); + UserVO user = new UserVO(1, "testuser", "password", "firstname", "lastName", "email", "timezone", UUID.randomUUID().toString(), User.Source.UNKNOWN); + CallContext.register(user, account); + + lenient().doNothing().when(accountManagerMock).checkAccess(any(Account.class), any(AccessType.class), any(Boolean.class), any(ControlledEntity.class)); + + when(volume.getInstanceId()).thenReturn(1L); + UserVmVO vm = mock(UserVmVO.class); + when(userVmDaoMock.findById(1L)).thenReturn(vm); + when(vm.getState()).thenReturn(State.Stopped); + when(volume.getState()).thenReturn(Volume.State.Ready); + when(volume.getId()).thenReturn(1L); + when(volumeDaoMock.getHypervisorType(1L)).thenReturn(HypervisorType.KVM); + when(volume.getFormat()).thenReturn(Storage.ImageFormat.RAW); + + volumeApiServiceImpl.validationsForCheckVolumeOperation(volume); + } } diff --git a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java index 6d9211dd526d..d98a4f8f0587 100644 --- a/server/src/test/java/com/cloud/user/AccountManagerImplTest.java +++ b/server/src/test/java/com/cloud/user/AccountManagerImplTest.java @@ -34,6 +34,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.snapshot.VMSnapshotVO; import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.GetUserKeysCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.api.response.UserTwoFactorAuthenticationSetupResponse; @@ -48,6 +49,7 @@ import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; @@ -91,6 +93,12 @@ public class AccountManagerImplTest extends AccountManagetImplTestBase { @Mock private Account accountMock; + @Mock + private DomainVO domainVoMock; + + @Mock + private AccountVO accountVoMock; + @Mock private ProjectAccountVO projectAccountVO; @Mock @@ -190,6 +198,42 @@ public void deleteUserAccountCleanup() { Mockito.verify(_accountDao, Mockito.atLeastOnce()).markForCleanup(Mockito.eq(42l)); } + @Test (expected = InvalidParameterValueException.class) + public void deleteUserTestIfUserIdIsEqualToCallerIdShouldThrowException() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + DeleteUserCmd cmd = Mockito.mock(DeleteUserCmd.class); + CallContext callContextMock = Mockito.mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + + Mockito.doReturn(userVoMock).when(callContextMock).getCallingUser(); + Mockito.doReturn(1L).when(cmd).getId(); + Mockito.doReturn(userVoMock).when(accountManagerImpl).getValidUserVO(Mockito.anyLong()); + Mockito.doReturn(accountVoMock).when(_accountDao).findById(Mockito.anyLong()); + Mockito.doReturn(domainVoMock).when(_domainDao).findById(Mockito.anyLong()); + Mockito.doReturn(1L).when(userVoMock).getId(); + + accountManagerImpl.deleteUser(cmd); + } + } + + @Test + public void deleteUserTestIfUserIdIsNotEqualToCallerIdShouldNotThrowException() { + try (MockedStatic callContextMocked = Mockito.mockStatic(CallContext.class)) { + DeleteUserCmd cmd = Mockito.mock(DeleteUserCmd.class); + CallContext callContextMock = Mockito.mock(CallContext.class); + callContextMocked.when(CallContext::current).thenReturn(callContextMock); + + Mockito.doReturn(userVoMock).when(callContextMock).getCallingUser(); + Mockito.doReturn(1L).when(cmd).getId(); + Mockito.doReturn(userVoMock).when(accountManagerImpl).getValidUserVO(Mockito.anyLong()); + Mockito.doReturn(accountVoMock).when(_accountDao).findById(Mockito.anyLong()); + Mockito.doReturn(2L).when(userVoMock).getId(); + + Mockito.doNothing().when(accountManagerImpl).checkAccountAndAccess(Mockito.any(), Mockito.any()); + accountManagerImpl.deleteUser(cmd); + } + } + @Test public void testAuthenticateUser() throws UnknownHostException { Pair successAuthenticationPair = new Pair<>(true, null); diff --git a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java index 38e5a3d41043..e89984b1749d 100644 --- a/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java +++ b/services/console-proxy/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyNoVncClient.java @@ -16,13 +16,6 @@ // under the License. package com.cloud.consoleproxy; -import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.eclipse.jetty.websocket.api.Session; -import org.eclipse.jetty.websocket.api.WebSocketException; -import org.eclipse.jetty.websocket.api.extensions.Frame; - import java.awt.Image; import java.io.IOException; import java.net.URI; @@ -30,6 +23,13 @@ import java.nio.charset.StandardCharsets; import java.util.List; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.eclipse.jetty.websocket.api.Session; +import org.eclipse.jetty.websocket.api.WebSocketException; +import org.eclipse.jetty.websocket.api.extensions.Frame; + import com.cloud.consoleproxy.vnc.NoVncClient; public class ConsoleProxyNoVncClient implements ConsoleProxyClient { @@ -115,11 +115,6 @@ public void run() { updateFrontEndActivityTime(); } connectionAlive = session.isOpen(); - try { - Thread.sleep(1); - } catch (InterruptedException e) { - logger.error("Error on sleep for vnc over websocket", e); - } } else if (client.isVncOverNioSocket()) { byte[] bytesArr; int nextBytes = client.getNextBytes(); @@ -140,6 +135,11 @@ public void run() { connectionAlive = false; } } + try { + Thread.sleep(1); + } catch (InterruptedException e) { + logger.error("Error on sleep for vnc sessions", e); + } } logger.info(String.format("Connection with client [%s] is dead.", clientId)); } catch (IOException e) { diff --git a/systemvm/agent/noVNC/keymaps/generate-language-keymaps.py b/systemvm/agent/noVNC/keymaps/generate-language-keymaps.py index 145891b8af49..4a88a05ef0d4 100755 --- a/systemvm/agent/noVNC/keymaps/generate-language-keymaps.py +++ b/systemvm/agent/noVNC/keymaps/generate-language-keymaps.py @@ -95,7 +95,7 @@ def generate_js_file(keymap_file): js_config.append(" * layout : %s\n" % layout) js_config.append(" */\n") js_config.append("export default {\n") - for keycode in dict(sorted(result_mappings.items(), key=lambda item: int(item[0]))): + for keycode in dict(sorted(list(result_mappings.items()), key=lambda item: int(item[0]))): js_config.append("%10s : \"%s\",\n" % ("\"" + str(keycode) + "\"", result_mappings[keycode].strip())) js_config.append("}\n") for line in js_config: diff --git a/systemvm/agent/packages/packages.ini b/systemvm/agent/packages/packages.ini new file mode 100644 index 000000000000..5693338dac4e --- /dev/null +++ b/systemvm/agent/packages/packages.ini @@ -0,0 +1,11 @@ +[python-is-python3] +debian_os=11 +package_name=python-is-python3 +file_name=python-is-python3_3.9.2-1_all.deb +conflicted_packages=python-is-python2 + +[python3-netaddr] +debian_os=11 +package_name=python3-netaddr +file_name=python3-netaddr_0.7.19-5_all.deb +conflicted_packages= diff --git a/systemvm/agent/packages/python-is-python3_3.9.2-1_all.deb b/systemvm/agent/packages/python-is-python3_3.9.2-1_all.deb new file mode 100644 index 000000000000..8e7af9538532 Binary files /dev/null and b/systemvm/agent/packages/python-is-python3_3.9.2-1_all.deb differ diff --git a/systemvm/agent/packages/python3-netaddr_0.7.19-5_all.deb b/systemvm/agent/packages/python3-netaddr_0.7.19-5_all.deb new file mode 100644 index 000000000000..17acf31bd93b Binary files /dev/null and b/systemvm/agent/packages/python3-netaddr_0.7.19-5_all.deb differ diff --git a/systemvm/debian/etc/apache2/vhost.template b/systemvm/debian/etc/apache2/vhost.template index 626705c8b225..7f6a51460995 100644 --- a/systemvm/debian/etc/apache2/vhost.template +++ b/systemvm/debian/etc/apache2/vhost.template @@ -93,7 +93,7 @@ # Enable/Disable SSL for this virtual host. SSLEngine on SSLProtocol TLSv1.2 - SSLCipherSuite @SECLEVEL=1:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA + SSLCipherSuite @SECLEVEL=0:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA SSLHonorCipherOrder on # A self-signed (snakeoil) certificate can be created by installing diff --git a/systemvm/debian/etc/vpcdnsmasq.conf b/systemvm/debian/etc/vpcdnsmasq.conf index 0850167a297e..c1fc18ce1a67 100644 --- a/systemvm/debian/etc/vpcdnsmasq.conf +++ b/systemvm/debian/etc/vpcdnsmasq.conf @@ -135,7 +135,7 @@ expand-hosts # of valid alternatives, so we will give examples of each. Note that # IP addresses DO NOT have to be in the range given above, they just # need to be on the same network. The order of the parameters in these -# do not matter, it's permissble to give name,adddress and MAC in any order +# do not matter, it's permissble to give name,address and MAC in any order # Always allocate the host with ethernet address 11:22:33:44:55:66 # The IP address 192.168.0.60 diff --git a/systemvm/debian/opt/cloud/bin/baremetal-vr.py b/systemvm/debian/opt/cloud/bin/baremetal-vr.py index 862775a3b605..e1de9291a125 100755 --- a/systemvm/debian/opt/cloud/bin/baremetal-vr.py +++ b/systemvm/debian/opt/cloud/bin/baremetal-vr.py @@ -59,8 +59,8 @@ def __call__(self, is_exception=True): err = [] err.append('failed to execute shell command: %s' % self.cmd) err.append('return code: %s' % self.process.returncode) - err.append('stdout: %s' % self.stdout) - err.append('stderr: %s' % self.stderr) + err.append('stdout: %s' % self.stdout.decode()) + err.append('stderr: %s' % self.stderr.decode()) raise Exception('\n'.join(err)) self.return_code = self.process.returncode diff --git a/systemvm/debian/opt/cloud/bin/configure.py b/systemvm/debian/opt/cloud/bin/configure.py index c261293af0fb..9dcef7eeaa26 100755 --- a/systemvm/debian/opt/cloud/bin/configure.py +++ b/systemvm/debian/opt/cloud/bin/configure.py @@ -21,8 +21,9 @@ import os import re import sys -import urllib -import urllib2 +import urllib.request +import urllib.parse +import urllib.error import time import copy @@ -41,9 +42,12 @@ from cs.CsStaticRoutes import CsStaticRoutes from cs.CsVpcGuestNetwork import CsVpcGuestNetwork -ICMPV6_TYPE_ANY = "{ destination-unreachable, packet-too-big, time-exceeded, parameter-problem, echo-request, echo-reply, mld-listener-query, mld-listener-report, mld-listener-done, nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect, router-renumbering }" +ICMPV6_TYPE_ANY = "{ destination-unreachable, packet-too-big, time-exceeded, parameter-problem, \ + echo-request, echo-reply, mld-listener-query, mld-listener-report, mld-listener-done, \ + nd-router-solicit, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert, nd-redirect, router-renumbering }" TCP_UDP_PORT_ANY = "{ 0-65535 }" + def removeUndesiredCidrs(cidrs, version): version_char = ":" if version == 4: @@ -61,15 +65,17 @@ def removeUndesiredCidrs(cidrs, version): return cidrs return None + def appendStringIfNotEmpty(s1, s2): if s2: - if type(s2) != str: + if not isinstance(s2, str): s2 = str(s2) if s1: return s1 + " " + s2 return s2 return s1 + class CsPassword(CsDataBag): TOKEN_FILE = "/tmp/passwdsrvrtoken" @@ -107,10 +113,10 @@ def __update(self, vm_ip, password): if proc.find(): url = "http://%s:8080/" % server_ip payload = {"ip": vm_ip, "password": password, "token": token} - data = urllib.urlencode(payload) - request = urllib2.Request(url, data=data, headers={"DomU_Request": "save_password"}) + data = urllib.parse.urlencode(payload).encode() + request = urllib.request.Request(url, data=data, headers={"DomU_Request": "save_password"}) try: - resp = urllib2.urlopen(request, data) + resp = urllib.request.urlopen(request, data) logging.debug("Update password server result: http:%s, content:%s" % (resp.code, resp.read())) except Exception as e: logging.error("Failed to update password server due to: %s" % e) @@ -165,15 +171,15 @@ def add_rule(self): icmp_type = '' rule = self.rule icmp_type = "any" - if "icmp_type" in self.rule.keys() and self.rule['icmp_type'] != -1: + if "icmp_type" in list(self.rule.keys()) and self.rule['icmp_type'] != -1: icmp_type = self.rule['icmp_type'] - if "icmp_code" in self.rule.keys() and rule['icmp_code'] != -1: + if "icmp_code" in list(self.rule.keys()) and rule['icmp_code'] != -1: icmp_type = "%s/%s" % (self.rule['icmp_type'], self.rule['icmp_code']) rnge = '' - if "first_port" in self.rule.keys() and \ + if "first_port" in list(self.rule.keys()) and \ self.rule['first_port'] == self.rule['last_port']: rnge = " --dport %s " % self.rule['first_port'] - if "first_port" in self.rule.keys() and \ + if "first_port" in list(self.rule.keys()) and \ self.rule['first_port'] != self.rule['last_port']: rnge = " --dport %s:%s" % (rule['first_port'], rule['last_port']) @@ -278,14 +284,14 @@ def __init__(self, obj, config): self.device = obj['device'] self.ip = obj['nic_ip'] self.ip6_cidr = None - if "nic_ip6_cidr" in obj.keys(): + if "nic_ip6_cidr" in list(obj.keys()): self.ip6_cidr = obj['nic_ip6_cidr'] self.netmask = obj['nic_netmask'] self.config = config self.cidr = "%s/%s" % (self.ip, self.netmask) - if "ingress_rules" in obj.keys(): + if "ingress_rules" in list(obj.keys()): self.ingress = obj['ingress_rules'] - if "egress_rules" in obj.keys(): + if "egress_rules" in list(obj.keys()): self.egress = obj['egress_rules'] self.fw = config.get_fw() self.ipv6_acl = config.get_ipv6_acl() @@ -308,9 +314,9 @@ def __process_ip6(self, direction, rule_list): self.ipv6_acl.insert(0, {'type': "chain", 'chain': chain}) for rule in rule_list: cidr = rule['cidr'] - if cidr != None and cidr != "": + if cidr is not None and cidr != "": cidr = removeUndesiredCidrs(cidr, 4) - if cidr == None or cidr == "": + if cidr is None or cidr == "": continue addr = "" if cidr: @@ -352,7 +358,7 @@ def __process_ip6(self, direction, rule_list): proto = "%s dport %s" % (proto, port) action = "drop" - if 'allowed' in rule.keys() and rule['allowed']: + if 'allowed' in list(rule.keys()) and rule['allowed']: action = "accept" rstr = addr @@ -376,9 +382,9 @@ def process(self, direction, rule_list, base): for i in rule_list: ruleData = copy.copy(i) cidr = ruleData['cidr'] - if cidr != None and cidr != "": + if cidr is not None and cidr != "": cidr = removeUndesiredCidrs(cidr, 6) - if cidr == None or cidr == "": + if cidr is None or cidr == "": continue ruleData['cidr'] = cidr r = self.AclRule(direction, self, ruleData, self.config, count) @@ -411,9 +417,9 @@ def init_vpc(self, direction, acl, rule, config): self.type = rule['type'] self.icmp_type = "any" self.protocol = self.type - if "icmp_type" in rule.keys() and rule['icmp_type'] != -1: + if "icmp_type" in list(rule.keys()) and rule['icmp_type'] != -1: self.icmp_type = rule['icmp_type'] - if "icmp_code" in rule.keys() and rule['icmp_code'] != -1: + if "icmp_code" in list(rule.keys()) and rule['icmp_code'] != -1: self.icmp_type = "%s/%s" % (self.icmp_type, rule['icmp_code']) if self.type == "protocol": if rule['protocol'] == 41: @@ -421,11 +427,11 @@ def init_vpc(self, direction, acl, rule, config): self.protocol = rule['protocol'] self.action = "DROP" self.dport = "" - if 'allowed' in rule.keys() and rule['allowed']: + if 'allowed' in list(rule.keys()) and rule['allowed']: self.action = "ACCEPT" - if 'first_port' in rule.keys(): + if 'first_port' in list(rule.keys()): self.dport = "-m %s --dport %s" % (self.protocol, rule['first_port']) - if 'last_port' in rule.keys() and self.dport and \ + if 'last_port' in list(rule.keys()) and self.dport and \ rule['last_port'] != rule['first_port']: self.dport = "%s:%s" % (self.dport, rule['last_port']) @@ -488,7 +494,7 @@ def process(self): continue rule = self.dbag[item] - if chains_added == False: + if chains_added is False: guest_cidr = rule['guest_ip6_cidr'] parent_chain = "fw_forward" chain = "fw_chain_egress" @@ -640,23 +646,26 @@ def __createfile(self, ip, folder, file, data): fh = open(dest, "w") self.__exflock(fh) if data is not None: - fh.write(data) + if isinstance(data, str): + fh.write(data) + elif isinstance(data, bytes): + fh.write(data.decode()) else: fh.write("") self.__unflock(fh) fh.close() - os.chmod(dest, 0644) + os.chmod(dest, 0o644) if folder == "metadata" or folder == "meta-data": try: - os.makedirs(metamanifestdir, 0755) + os.makedirs(metamanifestdir, 0o755) except OSError as e: # error 17 is already exists, we do it this way for concurrency if e.errno != 17: - print "failed to make directories " + metamanifestdir + " due to :" + e.strerror + print("failed to make directories " + metamanifestdir + " due to :" + e.strerror) sys.exit(1) if os.path.exists(metamanifest): - fh = open(metamanifest, "r+a") + fh = open(metamanifest, "a+") self.__exflock(fh) if file not in fh.read(): fh.write(file + '\n') @@ -670,17 +679,17 @@ def __createfile(self, ip, folder, file, data): fh.close() if os.path.exists(metamanifest): - os.chmod(metamanifest, 0644) + os.chmod(metamanifest, 0o644) def __htaccess(self, ip, folder, file): entry = "RewriteRule ^" + file + "$ ../" + folder + "/%{REMOTE_ADDR}/" + file + " [L,NC,QSA]" htaccessFolder = "/var/www/html/latest" htaccessFile = htaccessFolder + "/.htaccess" - CsHelper.mkdir(htaccessFolder, 0755, True) + CsHelper.mkdir(htaccessFolder, 0o755, True) if os.path.exists(htaccessFile): - fh = open(htaccessFile, "r+a") + fh = open(htaccessFile, "a+") self.__exflock(fh) if entry not in fh.read(): fh.write(entry + '\n') @@ -699,11 +708,11 @@ def __htaccess(self, ip, folder, file): htaccessFile = htaccessFolder+"/.htaccess" try: - os.makedirs(htaccessFolder, 0755) + os.makedirs(htaccessFolder, 0o755) except OSError as e: # error 17 is already exists, we do it this way for sake of concurrency if e.errno != 17: - print "failed to make directories " + htaccessFolder + " due to :" + e.strerror + print("failed to make directories " + htaccessFolder + " due to :" + e.strerror) sys.exit(1) fh = open(htaccessFile, "w") @@ -717,7 +726,7 @@ def __htaccess(self, ip, folder, file): htaccessFolder = "/var/www/html/latest" htaccessFile = htaccessFolder + "/.htaccess" - fh = open(htaccessFile, "r+a") + fh = open(htaccessFile, "a+") self.__exflock(fh) if entry not in fh.read(): fh.write(entry + '\n') @@ -734,7 +743,7 @@ def __exflock(self, file): try: flock(file, LOCK_EX) except IOError as e: - print "failed to lock file" + file.name + " due to : " + e.strerror + print("failed to lock file" + file.name + " due to : " + e.strerror) sys.exit(1) # FIXME return True @@ -742,7 +751,7 @@ def __unflock(self, file): try: flock(file, LOCK_UN) except IOError as e: - print "failed to unlock file" + file.name + " due to : " + e.strerror + print("failed to unlock file" + file.name + " due to : " + e.strerror) sys.exit(1) # FIXME return True @@ -838,9 +847,9 @@ def configure_ipsec(self, obj): file.addeq(" authby=secret") file.addeq(" keyexchange=%s" % ikeversion) file.addeq(" ike=%s" % ikepolicy) - file.addeq(" ikelifetime=%s" % self.convert_sec_to_h(obj['ike_lifetime'])) + file.addeq(" ikelifetime=%s" % self.convert_sec_to_min(obj['ike_lifetime'])) file.addeq(" esp=%s" % esppolicy) - file.addeq(" lifetime=%s" % self.convert_sec_to_h(obj['esp_lifetime'])) + file.addeq(" lifetime=%s" % self.convert_sec_to_min(obj['esp_lifetime'])) file.addeq(" keyingtries=2") file.addeq(" auto=route") if 'encap' not in obj: @@ -868,9 +877,9 @@ def configure_ipsec(self, obj): # This will load the new config CsHelper.execute("ipsec reload") - os.chmod(vpnsecretsfile, 0400) + os.chmod(vpnsecretsfile, 0o400) - for i in xrange(3): + for i in range(3): done = True for peeridx in range(0, len(peerlistarr)): # Check for the proper connection and subnet @@ -891,9 +900,9 @@ def configure_ipsec(self, obj): ipinsubnet = '.'.join(octets) CsHelper.execute("timeout 5 ping -c 3 %s" % ipinsubnet) - def convert_sec_to_h(self, val): - hrs = int(val) / 3600 - return "%sh" % hrs + def convert_sec_to_min(self, val): + mins = int(val / 60) + return "%sm" % mins class CsVpnUser(CsDataBag): @@ -1383,7 +1392,7 @@ def main(argv): databag_map.pop("guest_network") def execDatabag(key, db): - if key not in db.keys() or 'executor' not in db[key]: + if key not in list(db.keys()) or 'executor' not in db[key]: logging.warn("Unable to find config or executor(s) for the databag type %s" % key) return for executor in db[key]['executor']: @@ -1397,10 +1406,10 @@ def execIptables(config): if json_type == "cmd_line": logging.debug("cmd_line.json changed. All other files will be processed as well.") - for key in databag_map.keys(): + for key in list(databag_map.keys()): execDatabag(key, databag_map) execIptables(config) - elif json_type in databag_map.keys(): + elif json_type in list(databag_map.keys()): execDatabag(json_type, databag_map) if databag_map[json_type]['process_iptables']: execIptables(config) @@ -1411,5 +1420,6 @@ def execIptables(config): red.set() return 0 + if __name__ == "__main__": main(sys.argv) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py index 3cb782daf7ab..1b3d1a763873 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py @@ -19,11 +19,11 @@ from netaddr import IPAddress, IPNetwork import subprocess import time -import CsHelper -from CsDatabag import CsDataBag -from CsApp import CsApache, CsDnsmasq, CsPasswdSvc -from CsRoute import CsRoute -from CsRule import CsRule +from . import CsHelper +from .CsDatabag import CsDataBag +from .CsApp import CsApache, CsDnsmasq, CsPasswdSvc +from .CsRoute import CsRoute +from .CsRule import CsRule VRRP_TYPES = ['guest'] @@ -321,7 +321,7 @@ def configure(self, address): logging.info("Configuring address %s on device %s", self.ip(), self.dev) cmd = "ip addr add dev %s %s brd +" % (self.dev, self.ip()) CsHelper.execute(cmd) - cmd = "ifconfig %s mtu %s" % (self.dev, self.mtu()) + cmd = "ifconfig %s mtu %s" % (self.dev, self.mtu()) CsHelper.execute(cmd) except Exception as e: logging.info("Exception occurred ==> %s" % e) @@ -364,7 +364,7 @@ def post_configure(self, address): else: # once we start processing public ip's we need to verify there # is a default route and add if needed - if(self.cl.get_gateway()): + if self.cl.get_gateway(): route.add_defaultroute(self.cl.get_gateway()) if self.config.is_router() and self.cl.get_ip6gateway(): @@ -556,7 +556,7 @@ def fw_vpcrouter(self): "-A POSTROUTING -o %s -j SNAT --to-source %s" % (self.dev, self.address['public_ip'])]) if self.get_gateway() == self.get_ip_address(): - for inf, addresses in self.config.address().dbag.iteritems(): + for inf, addresses in self.config.address().dbag.items(): if not inf.startswith("eth"): continue for address in addresses: @@ -625,7 +625,7 @@ def post_config_change(self, method): if self.config.is_vpc(): if self.get_type() in ["public"] and "gateway" in self.address and self.address["gateway"] and self.address["gateway"] != "None": route.add_route(self.dev, self.address["gateway"]) - for inf, addresses in self.config.address().dbag.iteritems(): + for inf, addresses in self.config.address().dbag.items(): if not inf.startswith("eth"): continue for address in addresses: @@ -709,7 +709,7 @@ def list(self): self.iplist[cidr] = self.dev def configured(self): - if self.address['cidr'] in self.iplist.keys(): + if self.address['cidr'] in list(self.iplist.keys()): return True return False @@ -738,7 +738,7 @@ def getDevice(self): return self.dev def hasIP(self, ip): - return ip in self.address.values() + return ip in list(self.address.values()) def arpPing(self): cmd = "arping -c 1 -I %s -A -U -s %s %s" % ( @@ -749,7 +749,7 @@ def arpPing(self): # Delete any ips that are configured but not in the bag def compare(self, bag): - if len(self.iplist) > 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0): + if len(self.iplist) > 0 and (self.dev not in list(bag.keys()) or len(bag[self.dev]) == 0): # Remove all IPs on this device logging.info( "Will remove all configured addresses on device %s", self.dev) @@ -760,13 +760,13 @@ def compare(self, bag): # This condition should not really happen but did :) # It means an apache file got orphaned after a guest network address # was deleted - if len(self.iplist) == 0 and (self.dev not in bag.keys() or len(bag[self.dev]) == 0): + if len(self.iplist) == 0 and (self.dev not in list(bag.keys()) or len(bag[self.dev]) == 0): app = CsApache(self) app.remove() for ip in self.iplist: found = False - if self.dev in bag.keys(): + if self.dev in list(bag.keys()): for address in bag[self.dev]: self.setAddress(address) if (self.hasIP(ip) or self.is_guest_gateway(address, ip)) and address["add"]: @@ -799,7 +799,7 @@ def delete(self, ip): remove = [] if ip == "all": logging.info("Removing addresses from device %s", self.dev) - remove = self.iplist.keys() + remove = list(self.iplist.keys()) else: remove.append(ip) for ip in remove: diff --git a/systemvm/debian/opt/cloud/bin/cs/CsApp.py b/systemvm/debian/opt/cloud/bin/cs/CsApp.py index 123171a09c0d..064771184c1e 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsApp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsApp.py @@ -16,8 +16,8 @@ # specific language governing permissions and limitations # under the License. import os -from CsFile import CsFile -import CsHelper +from .CsFile import CsFile +from . import CsHelper class CsApp: diff --git a/systemvm/debian/opt/cloud/bin/cs/CsConfig.py b/systemvm/debian/opt/cloud/bin/cs/CsConfig.py index eaed71732d8d..bfc5c1349983 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsConfig.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsConfig.py @@ -16,8 +16,8 @@ # specific language governing permissions and limitations # under the License. -from CsDatabag import CsCmdLine, CsGuestNetwork -from CsAddress import CsAddress +from .CsDatabag import CsCmdLine, CsGuestNetwork +from .CsAddress import CsAddress import logging diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py index f2de92304ea0..a6e84bb0b8cc 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py @@ -33,7 +33,7 @@ def __init__(self, key, config=None): self.config = config def dump(self): - print self.dbag + print(self.dbag) def get_bag(self): return self.dbag @@ -151,7 +151,7 @@ def get_router_password(self): else: passwd = "%s-%s" % (self.get_vpccidr(), self.get_router_id()) md5 = hashlib.md5() - md5.update(passwd) + md5.update(passwd.encode()) return md5.hexdigest() def get_gateway(self): @@ -191,7 +191,7 @@ class CsGuestNetwork(CsDataBag): """ Get guestnetwork config parameters """ def get_dev_data(self, devname): - if devname in self.dbag and type(self.dbag[devname]) == list and len(self.dbag[devname]) > 0: + if devname in self.dbag and isinstance(self.dbag[devname], list) and len(self.dbag[devname]) > 0: return self.dbag[devname][0] return {} @@ -223,7 +223,7 @@ def get_router_ip6prelen(self, devname=None): if devname: return self.__get_device_router_ip6prelen(devname) else: - for key in self.dbag.keys(): + for key in list(self.dbag.keys()): ip6prelen = self.__get_device_router_ip6prelen(key) if ip6prelen: return ip6prelen @@ -240,7 +240,7 @@ def get_router_ip6gateway(self, devname=None): if devname: return self.__get_device_router_ip6gateway(devname) else: - for key in self.dbag.keys(): + for key in list(self.dbag.keys()): ip6gateway = self.__get_device_router_ip6gateway(key) if ip6gateway: return ip6gateway diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index d653093a9f28..0c4302270488 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -14,13 +14,13 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import CsHelper +from . import CsHelper import logging import os from netaddr import * from random import randint import json -from CsGuestNetwork import CsGuestNetwork +from .CsGuestNetwork import CsGuestNetwork from cs.CsDatabag import CsDataBag from cs.CsFile import CsFile from cs.CsAddress import CsIP diff --git a/systemvm/debian/opt/cloud/bin/cs/CsFile.py b/systemvm/debian/opt/cloud/bin/cs/CsFile.py index 2ee631a89d60..bad9cd9537ad 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsFile.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsFile.py @@ -70,7 +70,7 @@ def commit(self): def dump(self): for line in self.new_config: - print line + print(line) def addeq(self, string): """ Update a line in a file of the form token=something @@ -153,7 +153,7 @@ def searchString(self, search, ignoreLinesStartWith): logging.debug("Searching for %s string " % search) for index, line in enumerate(self.new_config): - print ' line = ' + line + print(' line = ' + line) if line.lstrip().startswith(ignoreLinesStartWith): continue if search in line: diff --git a/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py b/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py index 9a94dc66360d..a934862c224e 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsGuestNetwork.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. from merge import DataBag -import CsHelper +from . import CsHelper class CsGuestNetwork: @@ -27,7 +27,7 @@ def __init__(self, device, config): db.load() dbag = db.getDataBag() self.config = config - if device in dbag.keys() and len(dbag[device]) != 0: + if device in list(dbag.keys()) and len(dbag[device]) != 0: self.data = dbag[device][0] else: self.guest = False diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index b7db1b3fe164..926ea5f10d0d 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -87,7 +87,7 @@ def mkdir(name, mode, fatal): except OSError as e: if e.errno != 17: print("failed to make directories " + name + " due to :" + e.strerror) - if(fatal): + if fatal: sys.exit(1) @@ -115,8 +115,8 @@ def get_device_info(): list = [] for i in execute("ip addr show |grep -v secondary"): vals = i.strip().lstrip().rstrip().split() - if re.search('[0-9]:',vals[0]): - to={} + if re.search('[0-9]:', vals[0]): + to = {} to['mtu'] = vals[4] list.append(to) @@ -124,7 +124,7 @@ def get_device_info(): if len(list) > 0: to = list.pop(len(list)-1) else: - to={} + to = {} to['ip'] = vals[1] to['dev'] = vals[-1] to['network'] = IPNetwork(to['ip']) @@ -198,7 +198,7 @@ def execute(command): returncode = 0 logging.debug("Command [%s] has the result [%s]" % (command, result)) - return result.splitlines() + return result.decode().splitlines() except subprocess.CalledProcessError as e: logging.error(e) returncode = e.returncode diff --git a/systemvm/debian/opt/cloud/bin/cs/CsLoadBalancer.py b/systemvm/debian/opt/cloud/bin/cs/CsLoadBalancer.py index a45d57efe790..a92f06b18701 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsLoadBalancer.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsLoadBalancer.py @@ -18,9 +18,9 @@ import os.path import re from cs.CsDatabag import CsDataBag -from CsProcess import CsProcess -from CsFile import CsFile -import CsHelper +from .CsProcess import CsProcess +from .CsFile import CsFile +from . import CsHelper HAPROXY_CONF_T = "/etc/haproxy/haproxy.cfg.new" HAPROXY_CONF_P = "/etc/haproxy/haproxy.cfg" @@ -30,9 +30,9 @@ class CsLoadBalancer(CsDataBag): """ Manage Load Balancer entries """ def process(self): - if "config" not in self.dbag.keys(): + if "config" not in list(self.dbag.keys()): return - if 'configuration' not in self.dbag['config'][0].keys(): + if 'configuration' not in list(self.dbag['config'][0].keys()): return config = self.dbag['config'][0]['configuration'] file1 = CsFile(HAPROXY_CONF_T) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsMonitor.py b/systemvm/debian/opt/cloud/bin/cs/CsMonitor.py index 5a0ff5b114c4..5f02dedb6130 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsMonitor.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsMonitor.py @@ -16,7 +16,7 @@ # under the License. import logging from cs.CsDatabag import CsDataBag -from CsFile import CsFile +from .CsFile import CsFile import json MON_CONFIG = "/etc/monitor.conf" diff --git a/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py b/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py index a034034dc8bd..c753350eaf52 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsNetfilter.py @@ -15,8 +15,8 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import CsHelper -from CsDatabag import CsCmdLine +from . import CsHelper +from .CsDatabag import CsCmdLine import logging @@ -28,7 +28,7 @@ def __init__(self): self.count = {} def add(self, table, chain): - if table not in self.chain.keys(): + if table not in list(self.chain.keys()): self.chain.setdefault(table, []).append(chain) else: self.chain[table].append(chain) @@ -40,7 +40,7 @@ def add_rule(self, chain): self.count[chain] += 1 def get(self, table): - if table not in self.chain.keys(): + if table not in list(self.chain.keys()): return {} return self.chain[table] @@ -51,7 +51,7 @@ def last(self): return self.last_added def has_chain(self, table, chain): - if table not in self.chain.keys(): + if table not in list(self.chain.keys()): return False if chain not in self.chain[table]: return False @@ -179,7 +179,7 @@ def compare(self, list): # For now raising the log. # TODO: Need to fix in the framework. if ret.returncode != 0: - error = ret.communicate()[0] + error = ret.communicate()[0].decode() logging.debug("iptables command got failed ... continuing") ruleSet.add(tupledFw) self.chain.add_rule(rule_chain) @@ -223,14 +223,15 @@ def delete(self, rule): self.rules[:] = [x for x in self.rules if not x == rule] def add_ip6_chain(self, address_family, table, chain, hook, action): - chain_policy = "" - if hook: - chain_policy = "type filter hook %s priority 0;" % hook - if chain_policy and action: - chain_policy = "%s policy %s;" % (chain_policy, action) - CsHelper.execute("nft add chain %s %s %s '{ %s }'" % (address_family, table, chain, chain_policy)) - if hook == "input" or hook == "output": - CsHelper.execute("nft add rule %s %s %s icmpv6 type { echo-request, echo-reply, nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert } accept" % (address_family, table, chain)) + chain_policy = "" + if hook: + chain_policy = "type filter hook %s priority 0;" % hook + if chain_policy and action: + chain_policy = "%s policy %s;" % (chain_policy, action) + CsHelper.execute("nft add chain %s %s %s '{ %s }'" % (address_family, table, chain, chain_policy)) + if hook == "input" or hook == "output": + CsHelper.execute("nft add rule %s %s %s icmpv6 type { echo-request, echo-reply, \ + nd-neighbor-solicit, nd-router-advert, nd-neighbor-advert } accept" % (address_family, table, chain)) def apply_ip6_rules(self, rules, type): if len(rules) == 0: @@ -238,14 +239,14 @@ def apply_ip6_rules(self, rules, type): address_family = 'ip6' table = 'ip6_firewall' default_chains = [ - { "chain": "fw_input", "hook": "input", "action": "drop"}, - { "chain": "fw_forward", "hook": "forward", "action": "accept"} + {"chain": "fw_input", "hook": "input", "action": "drop"}, + {"chain": "fw_forward", "hook": "forward", "action": "accept"} ] if type == "acl": table = 'ip6_acl' default_chains = [ - { "chain": "acl_input", "hook": "input", "action": "drop" }, - { "chain": "acl_forward", "hook": "forward", "action": "accept"} + {"chain": "acl_input", "hook": "input", "action": "drop"}, + {"chain": "acl_forward", "hook": "forward", "action": "accept"} ] CsHelper.execute("nft add table %s %s" % (address_family, table)) for chain in default_chains: @@ -287,7 +288,7 @@ def mark_seen(self): self.seen = True def __convert_to_dict(self, rule): - rule = unicode(rule.lstrip()) + rule = str(rule.lstrip()) rule = rule.replace('! -', '!_-') rule = rule.replace('-p all', '') rule = rule.replace(' ', ' ') @@ -298,8 +299,8 @@ def __convert_to_dict(self, rule): rule = rule.replace('-m state', '-m2 state') rule = rule.replace('ESTABLISHED,RELATED', 'RELATED,ESTABLISHED') bits = rule.split(' ') - rule = dict(zip(bits[0::2], bits[1::2])) - if "-A" in rule.keys(): + rule = dict(list(zip(bits[0::2], bits[1::2]))) + if "-A" in list(rule.keys()): self.chain = rule["-A"] return rule @@ -334,7 +335,7 @@ def to_str(self, delete=False): '--to-source', '--to-destination', '--mark'] str = '' for k in order: - if k in self.rule.keys(): + if k in list(self.rule.keys()): printable = k.replace('-m2', '-m') printable = printable.replace('!_-', '! -') if delete: @@ -351,7 +352,7 @@ def __eq__(self, rule): return False if rule.get_chain() != self.get_chain(): return False - if len(rule.get_rule().items()) != len(self.get_rule().items()): + if len(list(rule.get_rule().items())) != len(list(self.get_rule().items())): return False common = set(rule.get_rule().items()) & set(self.get_rule().items()) if len(common) != len(rule.get_rule()): diff --git a/systemvm/debian/opt/cloud/bin/cs/CsProcess.py b/systemvm/debian/opt/cloud/bin/cs/CsProcess.py index 4a64807c86f6..1a0f35241f52 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsProcess.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsProcess.py @@ -17,7 +17,7 @@ # under the License. import os import re -import CsHelper +from . import CsHelper import logging diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py index f8928dc7fd94..7acf0a531300 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py @@ -32,13 +32,13 @@ # -------------------------------------------------------------------- # import os import logging -import CsHelper -from CsFile import CsFile -from CsProcess import CsProcess -from CsApp import CsPasswdSvc -from CsAddress import CsDevice -from CsRoute import CsRoute -from CsStaticRoutes import CsStaticRoutes +from . import CsHelper +from .CsFile import CsFile +from .CsProcess import CsProcess +from .CsApp import CsPasswdSvc +from .CsAddress import CsDevice +from .CsRoute import CsRoute +from .CsStaticRoutes import CsStaticRoutes import socket from time import sleep @@ -435,7 +435,7 @@ def _add_ipv6_to_interface(self, interface, ipv6): - public IPv6 for primary VR public NIC as its IPv6 gets lost on link down """ dev = '' - if dev == interface.get_device() or not ipv6 : + if dev == interface.get_device() or not ipv6: return dev = interface.get_device() command = "ip -6 address show %s | grep 'inet6 %s'" % (dev, ipv6) @@ -458,7 +458,7 @@ def _remove_ipv6_to_interface(self, interface, ipv6): - guest IPv6 gateway for primary VR guest NIC """ dev = '' - if dev == interface.get_device() or not ipv6 : + if dev == interface.get_device() or not ipv6: return dev = interface.get_device() command = "ip -6 address show %s | grep 'inet6 %s'" % (dev, ipv6) @@ -495,7 +495,6 @@ def _disable_radvd(self, dev): CsHelper.service("radvd", "disable") logging.info(CsHelper.execute("systemctl status radvd")) - def _add_ipv6_guest_gateway(self): """ Configure guest network gateway as IPv6 address for guest interface diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRoute.py b/systemvm/debian/opt/cloud/bin/cs/CsRoute.py index d5df611df300..796ef5057229 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRoute.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRoute.py @@ -15,7 +15,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import CsHelper +from . import CsHelper import logging diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRule.py b/systemvm/debian/opt/cloud/bin/cs/CsRule.py index f1caa2989049..c28ea7b1ad60 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRule.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRule.py @@ -15,7 +15,7 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import CsHelper +from . import CsHelper import logging diff --git a/systemvm/debian/opt/cloud/bin/cs/CsStaticRoutes.py b/systemvm/debian/opt/cloud/bin/cs/CsStaticRoutes.py index df98b2e03ce3..bcd669b6d454 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsStaticRoutes.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsStaticRoutes.py @@ -18,8 +18,8 @@ # under the License. import logging -import CsHelper -from CsDatabag import CsDataBag +from . import CsHelper +from .CsDatabag import CsDataBag class CsStaticRoutes(CsDataBag): diff --git a/systemvm/debian/opt/cloud/bin/cs/CsVpcGuestNetwork.py b/systemvm/debian/opt/cloud/bin/cs/CsVpcGuestNetwork.py index e80f16e915ae..9e918f94a429 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsVpcGuestNetwork.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsVpcGuestNetwork.py @@ -17,14 +17,15 @@ import logging import os.path from cs.CsDatabag import CsDataBag -from CsFile import CsFile -import CsHelper +from .CsFile import CsFile +from . import CsHelper VPC_PUBLIC_INTERFACE = "eth1" RADVD_CONF = "/etc/radvd.conf" RADVD_CONF_NEW = "/etc/radvd.conf.new" + class CsVpcGuestNetwork(CsDataBag): """ Manage Vpc Guest Networks """ @@ -53,13 +54,13 @@ def __disable_dad(self, device): CsHelper.execute("sysctl net.ipv6.conf." + device + ".use_tempaddr=0") def add_address_route(self, entry): - if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']: + if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']: self.enable_ipv6(entry['device']) cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1] full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size if not CsHelper.execute("ip -6 addr show dev %s | grep -w %s" % (entry['device'], full_addr)): CsHelper.execute("ip -6 addr add %s dev %s" % (full_addr, entry['device'])) - if 'router_ip6' in entry.keys() and entry['router_ip6']: + if 'router_ip6' in list(entry.keys()) and entry['router_ip6']: self.__disable_dad(VPC_PUBLIC_INTERFACE) full_public_addr = entry['router_ip6'] + "/" + cidr_size if not CsHelper.execute("ip -6 addr show dev %s | grep -w %s" % (VPC_PUBLIC_INTERFACE, full_public_addr)): @@ -70,11 +71,11 @@ def add_address_route(self, entry): return def remove_address_route(self, entry): - if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']: + if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']: cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1] full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size CsHelper.execute("ip -6 addr del %s dev %s" % (full_addr, entry['device'])) - if 'router_ip6' in entry.keys() and entry['router_ip6']: + if 'router_ip6' in list(entry.keys()) and entry['router_ip6']: full_public_addr = entry['router_ip6'] + "/" + cidr_size CsHelper.execute("ip -6 addr del %s dev %s" % (full_public_addr, VPC_PUBLIC_INTERFACE)) else: @@ -94,7 +95,7 @@ def enable_ipv6(self, device): self.__disable_dad(device) def add_radvd_conf(self, entry): - if 'router_guest_ip6' in entry.keys() and entry['router_guest_ip6']: + if 'router_guest_ip6' in list(entry.keys()) and entry['router_guest_ip6']: cidr_size = entry['router_guest_ip6_cidr'].split("/")[-1] full_addr = entry['router_guest_ip6_gateway'] + "/" + cidr_size self.conf.append("interface %s" % entry['device']) @@ -107,7 +108,7 @@ def add_radvd_conf(self, entry): self.conf.append(" AdvOnLink on;") self.conf.append(" AdvAutonomous on;") self.conf.append(" };") - if 'dns6' in entry.keys() and entry['dns6']: + if 'dns6' in list(entry.keys()) and entry['dns6']: for dns in entry['dns6'].split(","): self.conf.append(" RDNSS %s" % dns) self.conf.append(" {") diff --git a/systemvm/debian/opt/cloud/bin/cs_dhcp.py b/systemvm/debian/opt/cloud/bin/cs_dhcp.py index 8aa388a10f97..cd6574fd9265 100755 --- a/systemvm/debian/opt/cloud/bin/cs_dhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs_dhcp.py @@ -24,16 +24,16 @@ def merge(dbag, data): # This seems desirable .... if "add" in data and data['add'] is False and "ipv4_address" in data: if data['ipv4_address'] in dbag: - del(dbag[data['ipv4_address']]) + del dbag[data['ipv4_address']] else: remove_keys = set() - for key, entry in dbag.iteritems(): + for key, entry in dbag.items(): if key != 'id' and entry['mac_address'] == data['mac_address']: remove_keys.add(key) break for remove_key in remove_keys: - del(dbag[remove_key]) + del dbag[remove_key] dbag[data['ipv4_address']] = data diff --git a/systemvm/debian/opt/cloud/bin/cs_firewallrules.py b/systemvm/debian/opt/cloud/bin/cs_firewallrules.py index 1357c6c44402..474681ce1848 100755 --- a/systemvm/debian/opt/cloud/bin/cs_firewallrules.py +++ b/systemvm/debian/opt/cloud/bin/cs_firewallrules.py @@ -25,8 +25,8 @@ def merge(dbag, data): for rule in data['rules']: id = str(rule['id']) if rule['revoked']: - if id in dbagc.keys(): - del(dbagc[id]) - elif id not in dbagc.keys(): + if id in list(dbagc.keys()): + del dbagc[id] + elif id not in list(dbagc.keys()): dbagc[id] = rule return dbagc diff --git a/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py b/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py index 974c468e8dce..ec66979df272 100755 --- a/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py +++ b/systemvm/debian/opt/cloud/bin/cs_forwardingrules.py @@ -39,7 +39,7 @@ def merge(dbag, rules): dbag[source_ip] = [newrule] elif rules["type"] == "forwardrules": index = -1 - if source_ip in dbag.keys(): + if source_ip in list(dbag.keys()): for forward in dbag[source_ip]: if ruleCompare(forward, newrule): index = dbag[source_ip].index(forward) @@ -51,15 +51,15 @@ def merge(dbag, rules): dbag[source_ip] = [newrule] else: if rules["type"] == "staticnatrules": - if source_ip in dbag.keys(): + if source_ip in list(dbag.keys()): del dbag[source_ip] elif rules["type"] == "forwardrules": - if source_ip in dbag.keys(): + if source_ip in list(dbag.keys()): index = -1 for forward in dbag[source_ip]: if ruleCompare(forward, newrule): index = dbag[source_ip].index(forward) - print "removing index %s" % str(index) + print("removing index %s" % str(index)) if not index == -1: del dbag[source_ip][index] diff --git a/systemvm/debian/opt/cloud/bin/cs_guestnetwork.py b/systemvm/debian/opt/cloud/bin/cs_guestnetwork.py index 954346985f18..c6988a5d945e 100755 --- a/systemvm/debian/opt/cloud/bin/cs_guestnetwork.py +++ b/systemvm/debian/opt/cloud/bin/cs_guestnetwork.py @@ -28,11 +28,11 @@ def merge(dbag, gn): device_to_die = dbag[device][0] try: dbag[device].remove(device_to_die) - except ValueError, e: - print "[WARN] cs_guestnetwork.py :: Error occurred removing item from databag. => %s" % device_to_die - del(dbag[device]) + except ValueError as e: + print("[WARN] cs_guestnetwork.py :: Error occurred removing item from databag. => %s" % device_to_die) + del dbag[device] else: - del(dbag[device]) + del dbag[device] else: dbag.setdefault(device, []).append(gn) diff --git a/systemvm/debian/opt/cloud/bin/cs_ip.py b/systemvm/debian/opt/cloud/bin/cs_ip.py index fbe7629970a9..817d937eeafe 100755 --- a/systemvm/debian/opt/cloud/bin/cs_ip.py +++ b/systemvm/debian/opt/cloud/bin/cs_ip.py @@ -57,7 +57,7 @@ def merge(dbag, ip): ip['network'] = str(ipo.network) + '/' + str(ipo.prefixlen) if 'mtu' in ip: ip['mtu'] = str(ip['mtu']) - if 'nw_type' not in ip.keys(): + if 'nw_type' not in list(ip.keys()): ip['nw_type'] = 'public' else: ip['nw_type'] = ip['nw_type'].lower() diff --git a/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py b/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py index dff05bd28145..ea657230b2bc 100755 --- a/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py +++ b/systemvm/debian/opt/cloud/bin/cs_remoteaccessvpn.py @@ -20,8 +20,8 @@ def merge(dbag, vpn): key = vpn['vpn_server_ip'] op = vpn['create'] - if key in dbag.keys() and not op: - del(dbag[key]) + if key in list(dbag.keys()) and not op: + del dbag[key] else: dbag[key] = vpn return dbag diff --git a/systemvm/debian/opt/cloud/bin/cs_site2sitevpn.py b/systemvm/debian/opt/cloud/bin/cs_site2sitevpn.py index 3fa8414a7ab1..1f64aa778601 100755 --- a/systemvm/debian/opt/cloud/bin/cs_site2sitevpn.py +++ b/systemvm/debian/opt/cloud/bin/cs_site2sitevpn.py @@ -20,8 +20,8 @@ def merge(dbag, vpn): key = vpn['peer_gateway_ip'] op = vpn['create'] - if key in dbag.keys() and not op: - del(dbag[key]) + if key in list(dbag.keys()) and not op: + del dbag[key] else: dbag[key] = vpn return dbag diff --git a/systemvm/debian/opt/cloud/bin/cs_vpnusers.py b/systemvm/debian/opt/cloud/bin/cs_vpnusers.py index 3bef1fec239a..4a29cccdefbb 100755 --- a/systemvm/debian/opt/cloud/bin/cs_vpnusers.py +++ b/systemvm/debian/opt/cloud/bin/cs_vpnusers.py @@ -22,26 +22,26 @@ def merge(dbag, data): dbagc = copy.deepcopy(dbag) - print dbag - print data + print(dbag) + print(data) if "vpn_users" not in data: return dbagc # remove previously deleted user from the dict - for user in dbagc.keys(): + for user in list(dbagc.keys()): if user == 'id': continue userrec = dbagc[user] add = userrec['add'] if not add: - del(dbagc[user]) + del dbagc[user] for user in data['vpn_users']: username = user['user'] add = user['add'] - if username not in dbagc.keys(): + if username not in list(dbagc.keys()): dbagc[username] = user - elif username in dbagc.keys() and not add: + elif username in list(dbagc.keys()) and not add: dbagc[username] = user return dbagc diff --git a/systemvm/debian/opt/cloud/bin/diagnostics.py b/systemvm/debian/opt/cloud/bin/diagnostics.py index 737b12206db8..019710be6870 100755 --- a/systemvm/debian/opt/cloud/bin/diagnostics.py +++ b/systemvm/debian/opt/cloud/bin/diagnostics.py @@ -34,8 +34,8 @@ def run_cmd(command): return_code = 1 finally: - print('%s&&' % stdout.strip()) - print('%s&&' % stderr.strip()) + print('%s&&' % stdout.decode().strip()) + print('%s&&' % stderr.decode().strip()) print('%s' % return_code) diff --git a/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py index eac7d9c75536..4b2e2cbe533e 100644 --- a/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py +++ b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py @@ -28,17 +28,17 @@ def check_filesystem(): readOnly1 = bool(stat1.f_flag & ST_RDONLY) if (readOnly1): - print "Read-only file system : monitor results (/root) file system is mounted as read-only" + print("Read-only file system : monitor results (/root) file system is mounted as read-only") exit(1) stat2 = os.statvfs('/var/cache/cloud') readOnly2 = bool(stat2.f_flag & ST_RDONLY) if (readOnly2): - print "Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only" + print("Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only") exit(1) - print "file system is writable" + print("file system is writable") exit(0) diff --git a/systemvm/debian/opt/cloud/bin/get_diagnostics_files.py b/systemvm/debian/opt/cloud/bin/get_diagnostics_files.py index b95dfb5420c1..ac61cb2797f4 100755 --- a/systemvm/debian/opt/cloud/bin/get_diagnostics_files.py +++ b/systemvm/debian/opt/cloud/bin/get_diagnostics_files.py @@ -65,7 +65,7 @@ def zip_files(files): cleanup(files_from_shell_commands) generate_retrieved_files_txt(zf, files_found_list, files_not_found_list) zf.close() - print zf_name + print(zf_name) def get_cmd(script): @@ -102,7 +102,7 @@ def execute_shell_script(script): p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() return_code = p.returncode - if return_code is 0: + if return_code == 0: f.write(stdout) else: f.write(stderr) @@ -129,9 +129,9 @@ def generate_retrieved_files_txt(zip_file, files_found, files_not_found): try: with open(output_file, 'wb', 0) as man: for i in files_found: - man.write(i + '\n') + man.write((i + '\n').encode()) for j in files_not_found: - man.write(j + 'File Not Found!!\n') + man.write((j + ' File Not Found!!\n').encode()) zip_file.write(output_file, output_file) finally: cleanup_cmd = "rm -f %s" % output_file diff --git a/systemvm/debian/opt/cloud/bin/merge.py b/systemvm/debian/opt/cloud/bin/merge.py index 2409df0e7e2b..1d320395d74c 100755 --- a/systemvm/debian/opt/cloud/bin/merge.py +++ b/systemvm/debian/opt/cloud/bin/merge.py @@ -158,7 +158,7 @@ def processGuestNetwork(self, dbag): dp['mtu'] = str(d['mtu']) qf = QueueFile() qf.load({'ip_address': [dp], 'type': 'ips'}) - if 'domain_name' not in d.keys() or d['domain_name'] == '': + if 'domain_name' not in list(d.keys()) or d['domain_name'] == '': d['domain_name'] = "cloudnine.internal" return cs_guestnetwork.merge(dbag, d) @@ -227,7 +227,7 @@ def processCL(self, dbag): def processCLItem(self, num, nw_type): key = 'eth' + num + 'ip' dp = {} - if(key in self.qFile.data['cmd_line']): + if key in self.qFile.data['cmd_line']: dp['public_ip'] = self.qFile.data['cmd_line'][key] dp['netmask'] = self.qFile.data['cmd_line']['eth' + num + 'mask'] dp['source_nat'] = False @@ -236,7 +236,7 @@ def processCLItem(self, num, nw_type): if nw_type == "public": dp['gateway'] = self.qFile.data['cmd_line']['gateway'] else: - if('localgw' in self.qFile.data['cmd_line']): + if 'localgw' in self.qFile.data['cmd_line']: dp['gateway'] = self.qFile.data['cmd_line']['localgw'] else: dp['gateway'] = '' @@ -252,7 +252,7 @@ def processVmData(self, dbag): def process_ipaliases(self, dbag): nic_dev = None # Should be a way to deal with this better - for intf, data in dbag.items(): + for intf, data in list(dbag.items()): if intf == 'id': continue elif any([net['nw_type'] == 'guest' for net in data]): diff --git a/systemvm/debian/opt/cloud/bin/passwd_server_ip.py b/systemvm/debian/opt/cloud/bin/passwd_server_ip.py index 07884e1f5b5d..4d296176aa39 100755 --- a/systemvm/debian/opt/cloud/bin/passwd_server_ip.py +++ b/systemvm/debian/opt/cloud/bin/passwd_server_ip.py @@ -31,10 +31,10 @@ import sys import syslog import threading -import urlparse +import urllib.parse -from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer -from SocketServer import ThreadingMixIn #, ForkingMixIn +from http.server import BaseHTTPRequestHandler, HTTPServer +from socketserver import ThreadingMixIn #, ForkingMixIn passMap = {} @@ -55,7 +55,7 @@ def initToken(): with open(getTokenFile(), 'r') as f: secureToken = f.read() if not secureToken: - secureToken = binascii.hexlify(os.urandom(16)) + secureToken = binascii.hexlify(os.urandom(16)).decode() with open(getTokenFile(), 'w') as f: f.write(secureToken) @@ -64,7 +64,7 @@ def checkToken(token): def loadPasswordFile(): try: - with file(getPasswordFile()) as f: + with open(getPasswordFile()) as f: for line in f: if '=' not in line: continue key, value = line.strip().split('=', 1) @@ -75,11 +75,11 @@ def loadPasswordFile(): def savePasswordFile(): with lock: try: - with file(getPasswordFile(), 'w') as f: + with open(getPasswordFile(), 'w') as f: for ip in passMap: f.write('%s=%s\n' % (ip, passMap[ip])) f.close() - except IOError, e: + except IOError as e: syslog.syslog('serve_password: Unable to save to password file %s' % e) def getPassword(ip): @@ -117,7 +117,7 @@ def do_GET(self): self.wfile.write('saved_password') syslog.syslog('serve_password: requested password not found for %s' % clientAddress) else: - self.wfile.write(password) + self.wfile.write(password.encode()) syslog.syslog('serve_password: password sent to %s' % clientAddress) elif requestType == 'saved_password': removePassword(clientAddress) @@ -192,7 +192,7 @@ def serve(HandlerClass = PasswordRequestHandler, except KeyboardInterrupt: syslog.syslog('serve_password shutting down') passwordServer.socket.close() - except Exception, e: + except Exception as e: syslog.syslog('serve_password hit exception %s -- died' % e) passwordServer.socket.close() diff --git a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh index 55bd4eaf31e7..aa5d466c96ab 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh +++ b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh @@ -72,3 +72,4 @@ setup_k8s_node() { } setup_k8s_node +. /opt/cloud/bin/setup/patch.sh && patch_sshd_config diff --git a/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh b/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh index 6d6b5d815bfb..596ad50ab500 100755 --- a/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh +++ b/systemvm/debian/opt/cloud/bin/setup/consoleproxy.sh @@ -45,3 +45,5 @@ setup_console_proxy() { } setup_console_proxy +# System VMs are patched during bootstrap +. /opt/cloud/bin/setup/patch.sh && patch_system_vm diff --git a/systemvm/debian/opt/cloud/bin/setup/dhcpsrvr.sh b/systemvm/debian/opt/cloud/bin/setup/dhcpsrvr.sh index 0f65f304f366..04919bc9bb76 100755 --- a/systemvm/debian/opt/cloud/bin/setup/dhcpsrvr.sh +++ b/systemvm/debian/opt/cloud/bin/setup/dhcpsrvr.sh @@ -52,3 +52,4 @@ then exit 1 fi setup_dhcpsrvr +. /opt/cloud/bin/setup/patch.sh && patch_router diff --git a/systemvm/debian/opt/cloud/bin/setup/elbvm.sh b/systemvm/debian/opt/cloud/bin/setup/elbvm.sh index 52132ccea96f..4a89021dc4de 100755 --- a/systemvm/debian/opt/cloud/bin/setup/elbvm.sh +++ b/systemvm/debian/opt/cloud/bin/setup/elbvm.sh @@ -41,3 +41,4 @@ then exit 1 fi setup_elbvm +. /opt/cloud/bin/setup/patch.sh && patch_router diff --git a/systemvm/debian/opt/cloud/bin/setup/ilbvm.sh b/systemvm/debian/opt/cloud/bin/setup/ilbvm.sh index a130674d1e8a..3fe1093f3f4a 100755 --- a/systemvm/debian/opt/cloud/bin/setup/ilbvm.sh +++ b/systemvm/debian/opt/cloud/bin/setup/ilbvm.sh @@ -44,3 +44,4 @@ then exit 1 fi setup_ilbvm +. /opt/cloud/bin/setup/patch.sh && patch_router diff --git a/systemvm/debian/opt/cloud/bin/setup/patch.sh b/systemvm/debian/opt/cloud/bin/setup/patch.sh new file mode 100755 index 000000000000..fc0f7d35e6fa --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/setup/patch.sh @@ -0,0 +1,128 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin" + +log_it() { + echo "$(date) $@" >> /var/log/cloud.log +} + +patch_sshd_config() { + if `! ssh -Q PubkeyAcceptedAlgorithms >/dev/null 2>&1` && `grep ^PubkeyAcceptedAlgorithms /etc/ssh/sshd_config >/dev/null`; then + # "PubkeyAcceptedAlgorithms=+ssh-rsa" is added to /etc/ssh/sshd_config in 4.20.0 systemvm template + # However, it is not supported in old systemvm templates + # If the system vm is created from an old systemvm template, remove it from /etc/ssh/sshd_config + # No need to restart ssh if it is running well + log_it "Removing PubkeyAcceptedAlgorithms=+ssh-rsa from /etc/ssh/sshd_config as it is not supported" + sed -i "/PubkeyAcceptedAlgorithms=+ssh-rsa/d" /etc/ssh/sshd_config + if ! systemctl is-active ssh > /dev/null; then + systemctl restart ssh + fi + elif `ssh -Q PubkeyAcceptedAlgorithms >/dev/null 2>&1` && `! grep ^PubkeyAcceptedAlgorithms /etc/ssh/sshd_config >/dev/null`; then + log_it "Adding PubkeyAcceptedAlgorithms=+ssh-rsa to sshd_config" + sed -i "/PubkeyAuthentication yes/aPubkeyAcceptedAlgorithms=+ssh-rsa" /etc/ssh/sshd_config + systemctl restart ssh + fi +} + +patch_router() { + local patchfile="/var/cache/cloud/agent.zip" + local logfile="/var/log/patchrouter.log" + rm /usr/local/cloud/systemvm -rf + mkdir -p /usr/local/cloud/systemvm + ls -lrt $patchfile + + log_it "Unziping $patchfile" + echo "All" | unzip $patchfile -d /usr/local/cloud/systemvm >>$logfile 2>&1 + + find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555 + + patch_sshd_config + install_packages +} + +patch_system_vm() { + patch_sshd_config + install_packages +} + +install_packages() { + PACKAGES_FOLDER="/usr/local/cloud/systemvm/packages" + PACKAGES_INI="$PACKAGES_FOLDER/packages.ini" + declare -A package_properties + if [ -d $PACKAGES_FOLDER ] && [ -f $PACKAGES_INI ]; then + while read -r line; do + if [[ "$line" =~ ^(\[)(.*)(\])$ ]]; then + install_package + package_properties= + else + key=$(echo $line | cut -d '=' -f1) + value=$(echo $line | cut -d '=' -f2) + if [ "$key" != "" ]; then + package_properties[$key]=$value + fi + fi + done <$PACKAGES_INI + fi + export DEBIAN_FRONTEND=noninteractive + install_package +} + +install_package() { + local os=${package_properties["debian_os"]} + if [ "$os" == "" ]; then + return + fi + local DEBIAN_RELEASE=$(lsb_release -rs) + if [ "$os" != "$DEBIAN_RELEASE" ]; then + log_it "Skipped the installation of package $package on Debian $DEBIAN_RELEASE as it can only be installed on Debian $os." + return + fi + + local package=${package_properties["package_name"]} + local file=${package_properties["file_name"]} + if [ -z "$package" ] || [ -z "$file" ]; then + log_it "Skipped the installation due to empty package of file name (package name: $package, file name: $file)." + return + fi + + dpkg-query -s $package >/dev/null 2>&1 + if [ $? -eq 0 ]; then + log_it "Skipped the installation as package $package has already been installed." + return + fi + + local conflicts=${package_properties["conflicted_packages"]} + if [ "$conflicts" != "" ]; then + log_it "Removing conflicted packages \"$conflicts\" before installing package $package" + apt remove -y "$conflicts" + if [ $? -eq 0 ]; then + log_it "Removed conflicted package(s) \"$conflicts\" before installing package $package" + else + log_it "Failed to remove conflicted package(s) \"$conflicts\" before installing package $package" + fi + fi + + PACKAGES_FOLDER="/usr/local/cloud/systemvm/packages" + log_it "Installing package $package from file $PACKAGES_FOLDER/$file" + dpkg -i $PACKAGES_FOLDER/$file + if [ $? -eq 0 ]; then + log_it "Installed package $package from file $PACKAGES_FOLDER/$file" + else + log_it "Failed to install package $package from file $PACKAGES_FOLDER/$file" + fi +} diff --git a/systemvm/debian/opt/cloud/bin/setup/router.sh b/systemvm/debian/opt/cloud/bin/setup/router.sh index 190ad60d9159..ee4972c6fa2b 100755 --- a/systemvm/debian/opt/cloud/bin/setup/router.sh +++ b/systemvm/debian/opt/cloud/bin/setup/router.sh @@ -101,3 +101,4 @@ then exit 1 fi setup_router +. /opt/cloud/bin/setup/patch.sh && patch_router diff --git a/systemvm/debian/opt/cloud/bin/setup/secstorage.sh b/systemvm/debian/opt/cloud/bin/setup/secstorage.sh index af1015684df7..c60f70c3cef2 100755 --- a/systemvm/debian/opt/cloud/bin/setup/secstorage.sh +++ b/systemvm/debian/opt/cloud/bin/setup/secstorage.sh @@ -87,3 +87,5 @@ HTTP } setup_secstorage +# System VMs are patched during bootstrap +. /opt/cloud/bin/setup/patch.sh && patch_system_vm diff --git a/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh b/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh index bc08dccc5774..767f87848dd3 100755 --- a/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh +++ b/systemvm/debian/opt/cloud/bin/setup/vpcrouter.sh @@ -129,3 +129,4 @@ then exit 1 fi setup_vpcrouter +. /opt/cloud/bin/setup/patch.sh && patch_router diff --git a/systemvm/debian/opt/cloud/bin/update_config.py b/systemvm/debian/opt/cloud/bin/update_config.py index 8efe2ce664a9..419c1c3da999 100755 --- a/systemvm/debian/opt/cloud/bin/update_config.py +++ b/systemvm/debian/opt/cloud/bin/update_config.py @@ -62,7 +62,7 @@ def is_guestnet_configured(guestnet_dict, keys): existing_keys = [] new_eth_key = None - for k1, v1 in guestnet_dict.iteritems(): + for k1, v1 in guestnet_dict.items(): if k1 in keys and len(v1) > 0: existing_keys.append(k1) diff --git a/systemvm/debian/opt/cloud/bin/vmdata.py b/systemvm/debian/opt/cloud/bin/vmdata.py index 5cf22eb5d8f0..8a1636ca1b70 100755 --- a/systemvm/debian/opt/cloud/bin/vmdata.py +++ b/systemvm/debian/opt/cloud/bin/vmdata.py @@ -31,7 +31,7 @@ def main(argv): try: opts, args = getopt.getopt(argv, "f:d:") except getopt.GetoptError: - print 'params: -f -d ' + print('params: -f -d ') sys.exit(2) for opt, arg in opts: if opt == '-f': @@ -46,7 +46,7 @@ def main(argv): elif b64data != '': json_data = json.loads(base64.b64decode(b64data)) else: - print '-f or -d required' + print('-f or -d required') sys.exit(2) for ip in json_data: @@ -94,20 +94,23 @@ def createfile(ip, folder, file, data): fh = open(dest, "w") exflock(fh) if data is not None: - fh.write(data) + if isinstance(data, str): + fh.write(data) + elif isinstance(data, bytes): + fh.write(data.decode()) else: fh.write("") unflock(fh) fh.close() - os.chmod(dest, 0644) + os.chmod(dest, 0o644) if folder == "metadata" or folder == "meta-data": try: - os.makedirs(metamanifestdir, 0755) + os.makedirs(metamanifestdir, 0o755) except OSError as e: # error 17 is already exists, we do it this way for concurrency if e.errno != 17: - print "failed to make directories " + metamanifestdir + " due to :" + e.strerror + print("failed to make directories " + metamanifestdir + " due to :" + e.strerror) sys.exit(1) if os.path.exists(metamanifest): fh = open(metamanifest, "r+a") @@ -124,7 +127,7 @@ def createfile(ip, folder, file, data): fh.close() if os.path.exists(metamanifest): - os.chmod(metamanifest, 0644) + os.chmod(metamanifest, 0o644) def htaccess(ip, folder, file): @@ -133,11 +136,11 @@ def htaccess(ip, folder, file): htaccessFile = htaccessFolder+"/.htaccess" try: - os.makedirs(htaccessFolder, 0755) + os.makedirs(htaccessFolder, 0o755) except OSError as e: # error 17 is already exists, we do it this way for sake of concurrency if e.errno != 17: - print "failed to make directories " + htaccessFolder + " due to :" + e.strerror + print("failed to make directories " + htaccessFolder + " due to :" + e.strerror) sys.exit(1) fh = open(htaccessFile, "w") @@ -151,7 +154,7 @@ def exflock(file): try: flock(file, LOCK_EX) except IOError as e: - print "failed to lock file" + file.name + " due to : " + e.strerror + print("failed to lock file" + file.name + " due to : " + e.strerror) sys.exit(1) return True @@ -160,7 +163,7 @@ def unflock(file): try: flock(file, LOCK_UN) except IOError as e: - print "failed to unlock file" + file.name + " due to : " + e.strerror + print("failed to unlock file" + file.name + " due to : " + e.strerror) sys.exit(1) return True diff --git a/systemvm/debian/root/health_checks/cpu_usage_check.py b/systemvm/debian/root/health_checks/cpu_usage_check.py index 5e6a2fe5e9e8..ab2c4f1c46e8 100644 --- a/systemvm/debian/root/health_checks/cpu_usage_check.py +++ b/systemvm/debian/root/health_checks/cpu_usage_check.py @@ -28,7 +28,7 @@ def main(): data = entries[0] if "maxCpuUsage" not in data: - print "Missing maxCpuUsage in health_checks_data systemThresholds, skipping" + print("Missing maxCpuUsage in health_checks_data systemThresholds, skipping") exit(0) maxCpuUsage = float(data["maxCpuUsage"]) @@ -38,16 +38,16 @@ def main(): "sub(\"%\", \"\", idle); printf \"%.2f\", 100 - idle }'" pout = Popen(cmd, shell=True, stdout=PIPE) if pout.wait() == 0: - currentUsage = float(pout.communicate()[0].strip()) + currentUsage = float(pout.communicate()[0].decode().strip()) if currentUsage > maxCpuUsage: - print "CPU Usage " + str(currentUsage) + \ - "% has crossed threshold of " + str(maxCpuUsage) + "%" + print("CPU Usage " + str(currentUsage) + + "% has crossed threshold of " + str(maxCpuUsage) + "%") exit(1) - print "CPU Usage within limits with current at " \ - + str(currentUsage) + "%" + print("CPU Usage within limits with current at " + + str(currentUsage) + "%") exit(0) else: - print "Failed to retrieve cpu usage using " + cmd + print("Failed to retrieve cpu usage using " + cmd) exit(1) diff --git a/systemvm/debian/root/health_checks/dhcp_check.py b/systemvm/debian/root/health_checks/dhcp_check.py index 2618ee57cea5..025e494c2476 100755 --- a/systemvm/debian/root/health_checks/dhcp_check.py +++ b/systemvm/debian/root/health_checks/dhcp_check.py @@ -24,7 +24,7 @@ def main(): vMs = getHealthChecksData("virtualMachines") if vMs is None or len(vMs) == 0: - print "No VMs running data available, skipping" + print("No VMs running data available, skipping") exit(0) try: @@ -64,10 +64,10 @@ def main(): failureMessage = failureMessage + entry + ", " if failedCheck: - print failureMessage[:-2] + print(failureMessage[:-2]) exit(1) else: - print "All " + str(COUNT) + " VMs are present in dhcphosts.txt" + print("All " + str(COUNT) + " VMs are present in dhcphosts.txt") exit(0) diff --git a/systemvm/debian/root/health_checks/disk_space_check.py b/systemvm/debian/root/health_checks/disk_space_check.py index af8cb3dd07cf..f6c9a7fc497e 100644 --- a/systemvm/debian/root/health_checks/disk_space_check.py +++ b/systemvm/debian/root/health_checks/disk_space_check.py @@ -27,7 +27,7 @@ def main(): data = entries[0] if "minDiskNeeded" not in data: - print "Missing minDiskNeeded in health_checks_data systemThresholds, skipping" + print("Missing minDiskNeeded in health_checks_data systemThresholds, skipping") exit(0) minDiskNeeded = float(data["minDiskNeeded"]) * 1024 @@ -35,10 +35,10 @@ def main(): freeSpace = (s.f_bavail * s.f_frsize) / 1024 if (freeSpace < minDiskNeeded): - print "Insufficient free space is " + str(freeSpace/1024) + " MB" + print("Insufficient free space is " + str(freeSpace/1024) + " MB") exit(1) else: - print "Sufficient free space is " + str(freeSpace/1024) + " MB" + print("Sufficient free space is " + str(freeSpace/1024) + " MB") exit(0) diff --git a/systemvm/debian/root/health_checks/dns_check.py b/systemvm/debian/root/health_checks/dns_check.py index d4fbc129d518..92d7c54d1763 100644 --- a/systemvm/debian/root/health_checks/dns_check.py +++ b/systemvm/debian/root/health_checks/dns_check.py @@ -24,7 +24,7 @@ def main(): vMs = getHealthChecksData("virtualMachines") if vMs is None or len(vMs) == 0: - print "No VMs running data available, skipping" + print("No VMs running data available, skipping") exit(0) with open('/etc/hosts', 'r') as hostsFile: @@ -51,10 +51,10 @@ def main(): failureMessage = failureMessage + vM["ip"] + " " + vM["vmName"] + ", " if failedCheck: - print failureMessage[:-2] + print(failureMessage[:-2]) exit(1) else: - print "All " + str(COUNT) + " VMs are present in /etc/hosts" + print("All " + str(COUNT) + " VMs are present in /etc/hosts") exit(0) diff --git a/systemvm/debian/root/health_checks/gateways_check.py b/systemvm/debian/root/health_checks/gateways_check.py index e2c3f3f356d3..e3b661b7498a 100644 --- a/systemvm/debian/root/health_checks/gateways_check.py +++ b/systemvm/debian/root/health_checks/gateways_check.py @@ -24,7 +24,7 @@ def main(): gws = getHealthChecksData("gateways") if gws is None and len(gws) == 0: - print "No gateways data available, skipping" + print("No gateways data available, skipping") exit(0) unreachableGateWays = [] @@ -44,11 +44,11 @@ def main(): unreachableGateWays.append(gw) if len(unreachableGateWays) == 0: - print "All " + str(len(gwsList)) + " gateways are reachable via ping" + print("All " + str(len(gwsList)) + " gateways are reachable via ping") exit(0) else: - print "Unreachable gateways found-" - print unreachableGateWays + print("Unreachable gateways found-") + print(unreachableGateWays) exit(1) diff --git a/systemvm/debian/root/health_checks/haproxy_check.py b/systemvm/debian/root/health_checks/haproxy_check.py index 5e01ee3de413..c1db51e440c2 100644 --- a/systemvm/debian/root/health_checks/haproxy_check.py +++ b/systemvm/debian/root/health_checks/haproxy_check.py @@ -23,7 +23,7 @@ def checkMaxconn(haproxyData, haCfgSections): if "maxconn" in haproxyData and "maxconn" in haCfgSections["global"]: if haproxyData["maxconn"] != haCfgSections["global"]["maxconn"][0].strip(): - print "global maxconn mismatch occurred" + print("global maxconn mismatch occurred") return False return True @@ -38,26 +38,26 @@ def checkLoadBalance(haproxyData, haCfgSections): secName = "listen " + srcServer if secName not in haCfgSections: - print "Missing section for load balancing " + secName + "\n" + print("Missing section for load balancing " + secName + "\n") correct = False else: cfgSection = haCfgSections[secName] if "server" in cfgSection: if lbSec["algorithm"] != cfgSection["balance"][0]: - print "Incorrect balance method for " + secName + \ - "Expected : " + lbSec["algorithm"] + \ - " but found " + cfgSection["balance"][0] + "\n" + print("Incorrect balance method for " + secName + + "Expected : " + lbSec["algorithm"] + + " but found " + cfgSection["balance"][0] + "\n") correct = False bindStr = lbSec["sourceIp"] + ":" + formatPort(lbSec["sourcePortStart"], lbSec["sourcePortEnd"]) if cfgSection["bind"][0] != bindStr: - print "Incorrect bind string found. Expected " + bindStr + " but found " + cfgSection["bind"][0] + "." + print("Incorrect bind string found. Expected " + bindStr + " but found " + cfgSection["bind"][0] + ".") correct = False if (lbSec["sourcePortStart"] == "80" and lbSec["sourcePortEnd"] == "80" and lbSec["keepAliveEnabled"] == "false") \ or (lbSec["stickiness"].find("AppCookie") != -1 or lbSec["stickiness"].find("LbCookie") != -1): if not ("mode" in cfgSection and cfgSection["mode"][0] == "http"): - print "Expected HTTP mode but not found" + print("Expected HTTP mode but not found") correct = False expectedServerIps = lbSec["vmIps"].split(" ") @@ -74,7 +74,7 @@ def checkLoadBalance(haproxyData, haCfgSections): if not foundPattern: correct = False - print "Missing load balancing for " + pattern + ". " + print("Missing load balancing for " + pattern + ". ") return correct @@ -86,7 +86,7 @@ def main(): ''' haproxyData = getHealthChecksData("haproxyData") if haproxyData is None or len(haproxyData) == 0: - print "No data provided to check, skipping" + print("No data provided to check, skipping") exit(0) with open("/etc/haproxy/haproxy.cfg", 'r') as haCfgFile: @@ -94,7 +94,7 @@ def main(): haCfgFile.close() if len(haCfgLines) == 0: - print "Unable to read config file /etc/haproxy/haproxy.cfg" + print("Unable to read config file /etc/haproxy/haproxy.cfg") exit(1) haCfgSections = {} @@ -123,7 +123,7 @@ def main(): checkLbRules = checkLoadBalance(haproxyData, haCfgSections) if checkMaxConn and checkLbRules: - print "All checks pass" + print("All checks pass") exit(0) else: exit(1) diff --git a/systemvm/debian/root/health_checks/iptables_check.py b/systemvm/debian/root/health_checks/iptables_check.py index d80f05b99078..27e06f8352b6 100644 --- a/systemvm/debian/root/health_checks/iptables_check.py +++ b/systemvm/debian/root/health_checks/iptables_check.py @@ -24,7 +24,7 @@ def main(): portForwards = getHealthChecksData("portForwarding") if portForwards is None or len(portForwards) == 0: - print "No portforwarding rules provided to check, skipping" + print("No portforwarding rules provided to check, skipping") exit(0) failedCheck = False @@ -47,7 +47,7 @@ def main(): "for fetching rules by " + fetchIpTableEntriesCmd + "\n" continue - ipTablesMatchingEntries = pout.communicate()[0].strip().split('\n') + ipTablesMatchingEntries = pout.communicate()[0].decode().strip().split('\n') for pfEntryListExpected in entriesExpected: foundPfEntryList = False for ipTableEntry in ipTablesMatchingEntries: @@ -68,10 +68,10 @@ def main(): failureMessage = failureMessage + str(pfEntryListExpected) + "\n" if failedCheck: - print failureMessage + print(failureMessage) exit(1) else: - print "Found all entries (count " + str(len(portForwards)) + ") in iptables" + print("Found all entries (count " + str(len(portForwards)) + ") in iptables") exit(0) diff --git a/systemvm/debian/root/health_checks/memory_usage_check.py b/systemvm/debian/root/health_checks/memory_usage_check.py index 97ca0c5030e8..eba0d5e49dde 100644 --- a/systemvm/debian/root/health_checks/memory_usage_check.py +++ b/systemvm/debian/root/health_checks/memory_usage_check.py @@ -28,8 +28,8 @@ def main(): data = entries[0] if "maxMemoryUsage" not in data: - print "Missing maxMemoryUsage in health_checks_data " + \ - "systemThresholds, skipping" + print("Missing maxMemoryUsage in health_checks_data " + + "systemThresholds, skipping") exit(0) maxMemoryUsage = float(data["maxMemoryUsage"]) @@ -37,16 +37,16 @@ def main(): pout = Popen(cmd, shell=True, stdout=PIPE) if pout.wait() == 0: - currentUsage = float(pout.communicate()[0].strip()) + currentUsage = float(pout.communicate()[0].decode().strip()) if currentUsage > maxMemoryUsage: - print "Memory Usage " + str(currentUsage) + \ - "% has crossed threshold of " + str(maxMemoryUsage) + "%" + print("Memory Usage " + str(currentUsage) + + "% has crossed threshold of " + str(maxMemoryUsage) + "%") exit(1) - print "Memory Usage within limits with current at " + \ - str(currentUsage) + "%" + print("Memory Usage within limits with current at " + + str(currentUsage) + "%") exit(0) else: - print "Failed to retrieve memory usage using " + cmd + print("Failed to retrieve memory usage using " + cmd) exit(1) diff --git a/systemvm/debian/root/health_checks/router_version_check.py b/systemvm/debian/root/health_checks/router_version_check.py index 2173e09c81f0..0548a90e1c29 100644 --- a/systemvm/debian/root/health_checks/router_version_check.py +++ b/systemvm/debian/root/health_checks/router_version_check.py @@ -41,7 +41,7 @@ def main(): data = entries[0] if len(data) == 0: - print "Missing routerVersion in health_checks_data, skipping" + print("Missing routerVersion in health_checks_data, skipping") exit(0) templateVersionMatches = True @@ -52,11 +52,11 @@ def main(): releaseFile = "/etc/cloudstack-release" found = getFirstLine(releaseFile) if found is None: - print "Release version not yet setup at " + releaseFile +\ - ", skipping." + print("Release version not yet setup at " + releaseFile + + ", skipping.") elif expected != found: - print "Template Version mismatch. Expected: " + \ - expected + ", found: " + found + print("Template Version mismatch. Expected: " + + expected + ", found: " + found) templateVersionMatches = False if "scriptsVersion" in data: @@ -64,15 +64,15 @@ def main(): sigFile = "/var/cache/cloud/cloud-scripts-signature" found = getFirstLine(sigFile) if found is None: - print "Scripts signature is not yet setup at " + sigFile +\ - ", skipping" + print("Scripts signature is not yet setup at " + sigFile + + ", skipping") if expected != found: - print "Scripts Version mismatch. Expected: " + \ - expected + ", found: " + found + print("Scripts Version mismatch. Expected: " + + expected + ", found: " + found) scriptVersionMatches = False if templateVersionMatches and scriptVersionMatches: - print "Template and scripts version match successful" + print("Template and scripts version match successful") exit(0) else: exit(1) diff --git a/systemvm/debian/root/health_checks/utility/__init__.py b/systemvm/debian/root/health_checks/utility/__init__.py index 22ac3ff90137..a08995047f41 100644 --- a/systemvm/debian/root/health_checks/utility/__init__.py +++ b/systemvm/debian/root/health_checks/utility/__init__.py @@ -16,4 +16,4 @@ # specific language governing permissions and limitations # under the License. -from sharedFunctions import getHealthChecksData, formatPort +from .sharedFunctions import getHealthChecksData, formatPort diff --git a/systemvm/debian/root/monitorServices.py b/systemvm/debian/root/monitorServices.py index 909e419c1801..11169d4bdb88 100755 --- a/systemvm/debian/root/monitorServices.py +++ b/systemvm/debian/root/monitorServices.py @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. -from ConfigParser import SafeConfigParser +from configparser import ConfigParser from subprocess import * from datetime import datetime import time @@ -56,7 +56,7 @@ def getServicesConfig( config_file_path = "/etc/monitor.conf" ): """ process_dict = {} - parser = SafeConfigParser() + parser = ConfigParser() parser.read( config_file_path ) @@ -81,7 +81,7 @@ def printd (msg): f.seek(0, 2) f.write(str(msg)+"\n") f.close() - print str(msg) + print(str(msg)) def raisealert(severity, msg, process_name=None): """ Writes the alert message""" @@ -96,7 +96,7 @@ def raisealert(severity, msg, process_name=None): logging.info(log) msg = 'logger -t monit '+ log pout = Popen(msg, shell=True, stdout=PIPE) - print "[Alert] " + msg + print("[Alert] " + msg) def isPidMatchPidFile(pidfile, pids): @@ -148,7 +148,7 @@ def checkProcessRunningStatus(process_name, pidFile): #cmd = 'service ' + process_name + ' status' pout = Popen(cmd, shell=True, stdout=PIPE) exitStatus = pout.wait() - temp_out = pout.communicate()[0] + temp_out = pout.communicate()[0].decode() #check there is only one pid or not if exitStatus == 0: @@ -258,12 +258,12 @@ def monitProcess( processes_info ): printd("No config items provided - means a redundant VR or a VPC Router") return service_status, failing_services - print "[Process Info] " + json.dumps(processes_info) + print("[Process Info] " + json.dumps(processes_info)) #time for noting process down time csec = repr(time.time()).split('.')[0] - for process,properties in processes_info.items(): + for process,properties in list(processes_info.items()): printd ("---------------------------\nchecking the service %s\n---------------------------- " %process) serviceName = process + ".service" processStatus, wasRestarted = checkProcessStatus(properties) @@ -296,7 +296,7 @@ def execute(script, checkType = "basic"): pout = Popen(cmd, shell=True, stdout=PIPE) exitStatus = pout.wait() - output = pout.communicate()[0].strip() + output = pout.communicate()[0].decode().strip() checkEndTime = time.time() if exitStatus == 0: diff --git a/systemvm/patch-sysvms.sh b/systemvm/patch-sysvms.sh index 554218c98781..4f4a38f8ae42 100644 --- a/systemvm/patch-sysvms.sh +++ b/systemvm/patch-sysvms.sh @@ -104,16 +104,18 @@ cleanup_systemVM() { rm -rf $backupfolder mv "$newpath"cloud-scripts.tgz /usr/share/cloud/cloud-scripts.tgz rm -rf "$newpath""agent.zip" "$newpath""patch-sysvms.sh" + if [ "$TYPE" != "consoleproxy" ] && [ "$TYPE" != "secstorage" ]; then + rm -rf /usr/local/cloud/systemvm/ + fi } patch_systemvm() { rm -rf /usr/local/cloud/systemvm - if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ]; then - echo "All" | unzip $newpath/agent.zip -d /usr/local/cloud/systemvm >> $logfile 2>&1 - mkdir -p /usr/local/cloud/systemvm - find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555 - fi + echo "All" | unzip $newpath/agent.zip -d /usr/local/cloud/systemvm >> $logfile 2>&1 + mkdir -p /usr/local/cloud/systemvm + find /usr/local/cloud/systemvm/ -name \*.sh | xargs chmod 555 + echo "Extracting cloud scripts" >> $logfile 2>&1 tar -xvf $newpath/cloud-scripts.tgz -C / >> $logfile 2>&1 @@ -124,6 +126,10 @@ patch_systemvm() { update_checksum $newpath/cloud-scripts.tgz + if [ -f /opt/cloud/bin/setup/patch.sh ];then + . /opt/cloud/bin/setup/patch.sh && patch_system_vm + fi + if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] || [[ "$TYPE" == *router ]]; then restart_services fi diff --git a/systemvm/systemvm-agent-descriptor.xml b/systemvm/systemvm-agent-descriptor.xml index 74b154387c3e..8cf40a162766 100644 --- a/systemvm/systemvm-agent-descriptor.xml +++ b/systemvm/systemvm-agent-descriptor.xml @@ -121,5 +121,12 @@ **/* + + agent/packages + packages + + **/* + + diff --git a/systemvm/test/__init__.py b/systemvm/test/__init__.py new file mode 100755 index 000000000000..e408e0ca0fef --- /dev/null +++ b/systemvm/test/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), "..", "debian/opt/cloud/bin")) diff --git a/systemvm/test/runtests.sh b/systemvm/test/runtests.sh index c6fab63cefdb..63966430ae1b 100644 --- a/systemvm/test/runtests.sh +++ b/systemvm/test/runtests.sh @@ -45,5 +45,5 @@ then fi echo "Running systemvm/python unit tests" -nosetests2.7 . +nosetests3 . exit $? diff --git a/test/integration/component/test_deploy_vm_userdata_multi_nic.py b/test/integration/component/test_deploy_vm_userdata_multi_nic.py index 766c96ac1196..8743e9972f11 100644 --- a/test/integration/component/test_deploy_vm_userdata_multi_nic.py +++ b/test/integration/component/test_deploy_vm_userdata_multi_nic.py @@ -79,7 +79,7 @@ def setUpClass(cls): # Enable Network offering cls.network_offering_nouserdata.update(cls.api_client, state='Enabled') - # Create Network Offering with all the serices + # Create Network Offering with all the services cls.network_offering_all = NetworkOffering.create( cls.api_client, cls.test_data["isolated_network_offering"] diff --git a/test/integration/smoke/test_guest_os.py b/test/integration/smoke/test_guest_os.py index 01fe34395e0b..c9d50a7c523c 100644 --- a/test/integration/smoke/test_guest_os.py +++ b/test/integration/smoke/test_guest_os.py @@ -181,7 +181,7 @@ def test_guest_OS_mapping_check_with_hypervisor(self): raise unittest.SkipTest("OS name check with hypervisor is supported only on XenServer and VMware") if self.hypervisor.hypervisor.lower() == "xenserver": - testosname="Debian Squeeze 6.0 (32-bit)" + testosname="Debian Jessie 8.0" else: testosname="debian4_64Guest" diff --git a/test/integration/smoke/test_migration.py b/test/integration/smoke/test_migration.py index 3b21a0b39366..cadb50692a9b 100644 --- a/test/integration/smoke/test_migration.py +++ b/test/integration/smoke/test_migration.py @@ -119,7 +119,7 @@ def setUpClass(cls): cls.network_offering_nouserdata.update(cls.api_client, state='Enabled') - # Create Network Offering with all the serices + # Create Network Offering with all the services cls.network_offering_all = NetworkOffering.create( cls.api_client, cls.test_data["isolated_network_offering"] diff --git a/test/integration/smoke/test_network_ipv6.py b/test/integration/smoke/test_network_ipv6.py index 720f14e64da6..2c369f283003 100644 --- a/test/integration/smoke/test_network_ipv6.py +++ b/test/integration/smoke/test_network_ipv6.py @@ -697,15 +697,16 @@ def createAndVerifyIpv6FirewallRule(self, traffic_type, source_cidr, dest_cidr, "IPv6 firewall rule ICMP code mismatch %d, %d" % (rule.icmpcode, icmp_code)) routerCmd = "nft list chain ip6 %s %s" % (FIREWALL_TABLE, FIREWALL_CHAINS[traffic_type]) res = self.getRouterProcessStatus(self.getNetworkRouter(self.network), routerCmd) - self.assertTrue(parsed_rule in res, - "Listing firewall rule with nft list chain failure for rule: %s" % parsed_rule) + parsed_rule_new = parsed_rule.replace("{ ", "").replace(" }", "") + self.assertTrue(parsed_rule in res or parsed_rule_new in res, + "Listing firewall rule with nft list chain failure for rule: '%s' is not in '%s'" % (parsed_rule, res)) if delete == True: cmd = deleteIpv6FirewallRule.deleteIpv6FirewallRuleCmd() cmd.id = fw_rule.id self.userapiclient.deleteIpv6FirewallRule(cmd) res = self.getRouterProcessStatus(self.getNetworkRouter(self.network), routerCmd) - self.assertFalse(parsed_rule in res, - "Firewall rule present in nft list chain failure despite delete for rule: %s" % parsed_rule) + self.assertFalse(parsed_rule in res or parsed_rule_new in res, + "Firewall rule present in nft list chain failure despite delete for rule: '%s' is in '%s'" % (parsed_rule, res)) def checkIpv6FirewallRule(self): traffic_type = "Ingress" diff --git a/test/integration/smoke/test_routers.py b/test/integration/smoke/test_routers.py index 356bd213cfc3..64bad11f240b 100644 --- a/test/integration/smoke/test_routers.py +++ b/test/integration/smoke/test_routers.py @@ -22,7 +22,8 @@ from marvin.cloudstackAPI import (stopRouter, restartNetwork, startRouter, - rebootRouter) + rebootRouter, + getRouterHealthCheckResults) from marvin.lib.utils import (cleanup_resources, get_process_status, get_host_credentials) @@ -303,7 +304,81 @@ def test_02_router_internal_adv(self): "Check haproxy service is running or not" ) self.debug("Haproxy process status: %s" % res) - return + + routers = list_routers( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + fetchhealthcheckresults=True + ) + + self.assertEqual(isinstance(routers, list), True, + "Check for list routers response return valid data" + ) + self.assertNotEqual( + len(routers), 0, + "Check list router response" + ) + + router = routers[0] + self.info("Router ID: %s & Router state: %s" % ( + router.id, router.state + )) + + self.assertEqual(isinstance(router.healthcheckresults, list), True, + "Router response should contain it's health check result as list" + ) + + cmd = getRouterHealthCheckResults.getRouterHealthCheckResultsCmd() + cmd.routerid = router.id + cmd.performfreshchecks = True # Perform fresh checks as a newly created router may not have results + healthData = self.apiclient.getRouterHealthCheckResults(cmd) + self.info("Router ID: %s & Router state: %s" % ( + router.id, router.state + )) + + self.assertEqual(router.id, healthData.routerid, + "Router response should contain it's health check result so id should match" + ) + self.assertEqual(isinstance(healthData.healthchecks, list), True, + "Router response should contain it's health check result as list" + ) + + self.verifyCheckTypes(healthData.healthchecks) + self.verifyCheckNames(healthData.healthchecks) + self.verifyCheckResults(healthData.healthchecks) + + def verifyCheckTypes(self, healthChecks): + for checkType in ["basic", "advanced"]: + foundType = False + for check in healthChecks: + if check.checktype == checkType: + foundType = True + break + self.assertTrue(foundType, + "Router should contain health check results info for type: " + checkType + ) + + def verifyCheckNames(self, healthChecks): + for checkName in ["dns_check.py", "dhcp_check.py", "haproxy_check.py", "disk_space_check.py", "iptables_check.py", "gateways_check.py", "router_version_check.py"]: + foundCheck = False + for check in healthChecks: + if check.checkname == checkName: + foundCheck = True + break + self.assertTrue(foundCheck, + "Router should contain health check results info for check name: " + checkName + ) + + def verifyCheckResults(self, healthChecks): + failedCheck = 0 + for check in healthChecks: + if check.success: + print("check %s is good" % check.checkname) + else: + print("check %s failed due to %s" % (check.checkname, check.details)) + failedCheck = failedCheck + 1 + self.assertEquals(failedCheck, 0) @attr( tags=[ diff --git a/test/integration/smoke/test_routers_network_ops.py b/test/integration/smoke/test_routers_network_ops.py index a133e9aed98b..ac51d3736add 100644 --- a/test/integration/smoke/test_routers_network_ops.py +++ b/test/integration/smoke/test_routers_network_ops.py @@ -287,8 +287,8 @@ def test_01_RVR_Network_FW_PF_SSH_default_routes_egress_true(self): ) expected = 1 - ssh_command = "wget -t 1 -T 5 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -307,8 +307,8 @@ def test_01_RVR_Network_FW_PF_SSH_default_routes_egress_true(self): ) expected = 0 - ssh_command = "wget -t 1 -T 1 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -451,8 +451,8 @@ def test_02_RVR_Network_FW_PF_SSH_default_routes_egress_false(self): ) expected = 0 - ssh_command = "wget -t 1 -T 1 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -480,8 +480,8 @@ def test_02_RVR_Network_FW_PF_SSH_default_routes_egress_false(self): ) expected = 1 - ssh_command = "wget -t 1 -T 5 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -840,8 +840,8 @@ def test_01_isolate_network_FW_PF_default_routes_egress_true(self): ) expected = 1 - ssh_command = "wget -t 1 -T 5 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -860,8 +860,8 @@ def test_01_isolate_network_FW_PF_default_routes_egress_true(self): ) expected = 0 - ssh_command = "wget -t 1 -T 1 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -995,8 +995,8 @@ def test_02_isolate_network_FW_PF_default_routes_egress_false(self): ) expected = 0 - ssh_command = "wget -t 1 -T 1 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( @@ -1015,8 +1015,8 @@ def test_02_isolate_network_FW_PF_default_routes_egress_false(self): ) expected = 1 - ssh_command = "wget -t 1 -T 5 www.google.com" - check_string = "HTTP request sent, awaiting response... 200 OK" + ssh_command = "curl -v -m 1 -o index.html -sL www.google.com" + check_string = "200 OK" result = check_router_command(virtual_machine, nat_rule.ipaddress, ssh_command, check_string, self) self.assertEqual( diff --git a/test/integration/smoke/test_vpc_ipv6.py b/test/integration/smoke/test_vpc_ipv6.py index ce4d466acf0b..efec43add7ca 100644 --- a/test/integration/smoke/test_vpc_ipv6.py +++ b/test/integration/smoke/test_vpc_ipv6.py @@ -761,8 +761,9 @@ def verifyAclRulesInRouter(self, nic, rules, router): acl_chain = nic + ACL_CHAINS_SUFFIX[rule["traffictype"]] routerCmd = "nft list chain ip6 %s %s" % (ACL_TABLE, acl_chain) res = self.getRouterProcessStatus(router, routerCmd) - self.assertTrue(rule["parsedrule"] in res, - "Listing firewall rule with nft list chain failure for rule: %s" % rule["parsedrule"]) + parsed_rule_new = rule["parsedrule"].replace("{ ", "").replace(" }", "") + self.assertTrue(rule["parsedrule"] in res or parsed_rule_new in res, + "Listing firewall rule with nft list chain failure for rule: '%s' is not in '%s'" % (rule["parsedrule"], res)) def checkIpv6AclRule(self): router = self.getVpcRouter(self.vpc) diff --git a/test/integration/smoke/test_vpc_vpn.py b/test/integration/smoke/test_vpc_vpn.py index 63846cf10ff1..bcee37a06c74 100644 --- a/test/integration/smoke/test_vpc_vpn.py +++ b/test/integration/smoke/test_vpc_vpn.py @@ -592,7 +592,7 @@ def checkVpnConnected(): time.sleep(20) # setup ssh connection to vm2 - ssh_client = self._get_ssh_client(vm2, self.services, 10) + ssh_client = self._get_ssh_client(vm2, self.services, 30) if ssh_client: # run ping test diff --git a/test/metadata/func/loadbalancers.xml b/test/metadata/func/loadbalancers.xml index 781d30eab9dc..cd983148c8fc 100644 --- a/test/metadata/func/loadbalancers.xml +++ b/test/metadata/func/loadbalancers.xml @@ -1660,7 +1660,7 @@ under the License. - + deployVirtualMachine [Deploy a Virtual Machine-1 to check multiple VMs - LB assignment] diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index ae71ed5c0638..1ed0edd492f9 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -66,7 +66,7 @@ d-i partman-auto/expert_recipe string \ use_filesystem{ } filesystem{ ext2 } \ mountpoint{ /boot } \ . \ - 256 1000 256 linux-swap \ + 512 1000 512 linux-swap \ method{ swap } format{ } \ . \ 2240 40 4000 ext4 \ diff --git a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh index bca5077dafeb..91a1dd3cdabc 100644 --- a/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh +++ b/tools/appliance/systemvmtemplate/scripts/apt_upgrade.sh @@ -36,8 +36,8 @@ function add_backports() { sed -i '/deb-src/d' /etc/apt/sources.list sed -i '/backports/d' /etc/apt/sources.list sed -i '/security/d' /etc/apt/sources.list - echo 'deb http://http.debian.net/debian bullseye-backports main' >> /etc/apt/sources.list - echo 'deb http://security.debian.org/debian-security bullseye-security main' >> /etc/apt/sources.list + echo 'deb http://http.debian.net/debian bookworm-backports main' >> /etc/apt/sources.list + echo 'deb http://security.debian.org/debian-security bookworm-security main' >> /etc/apt/sources.list } function apt_upgrade() { diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index 27a1ead43dc1..1a465f4999fd 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -19,7 +19,7 @@ set -e set -x -CLOUDSTACK_RELEASE=4.19.0 +CLOUDSTACK_RELEASE=4.20.0 function configure_apache2() { # Enable ssl, rewrite and auth diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 3b4ef6537286..92223cfbd889 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -22,7 +22,7 @@ set -x function install_vhd_util() { [[ -f /bin/vhd-util ]] && return - wget --no-check-certificate https://github.com/shapeblue/cloudstack-nonoss/raw/main/vhd-util -O /bin/vhd-util + wget --no-check-certificate https://download.cloudstack.org/tools/vhd-util -O /bin/vhd-util chmod a+x /bin/vhd-util } @@ -53,7 +53,7 @@ function install_packages() { ${apt_get} install grub-legacy \ rsyslog logrotate cron net-tools ifupdown tmux vim-tiny htop netbase iptables nftables \ openssh-server e2fsprogs tcpdump iftop socat wget coreutils systemd \ - python python3 python3-flask ieee-data \ + python-is-python3 python3 python3-flask python3-netaddr ieee-data \ bzip2 sed gawk diffutils grep gzip less tar telnet ftp rsync traceroute psmisc lsof procps \ inetutils-ping iputils-arping httping curl \ dnsutils zip unzip ethtool uuid file iproute2 acpid sudo \ @@ -63,10 +63,10 @@ function install_packages() { nfs-common \ samba-common cifs-utils \ xl2tpd bcrelay ppp tdb-tools \ - xenstore-utils libxenstore3.0 \ + xenstore-utils libxenstore4 \ ipvsadm conntrackd libnetfilter-conntrack3 \ keepalived irqbalance \ - openjdk-11-jre-headless \ + openjdk-17-jre-headless \ ipcalc ipset \ iptables-persistent \ libtcnative-1 libssl-dev libapr1-dev \ @@ -80,10 +80,6 @@ function install_packages() { apt-get install -y python3-json-pointer python3-jsonschema cloud-init - # python2-netaddr workaround - wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb - dpkg -i python-netaddr_0.7.19-1_all.deb - apt_clean # 32 bit architecture support for vhd-util @@ -104,9 +100,9 @@ function install_packages() { install_vhd_util # Install xenserver guest utilities as debian repos don't have it - wget https://mirrors.kernel.org/ubuntu/pool/main/x/xe-guest-utilities/xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb - dpkg -i xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb - rm -f xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb + wget --no-check-certificate https://download.cloudstack.org/systemvm/debian/xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + dpkg -i xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb + rm -f xe-guest-utilities_7.20.2-0ubuntu1_amd64.deb } return 2>/dev/null || install_packages diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index ba3a84385d09..fe735d26b5e8 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -27,8 +27,8 @@ "format": "qcow2", "headless": true, "http_directory": "http", - "iso_checksum": "sha512:da7e7867ed043b784f5ae7e4adaaf4f023b5235f0fa2ead1279dc93f74bc17801ed906d330e3cd68ee8d3e96b697d21d23cfe2b755f5a9eb555bd5390a8c4dac", - "iso_url": "https://cdimage.debian.org/mirror/cdimage/archive/11.8.0/amd64/iso-cd/debian-11.8.0-amd64-netinst.iso", + "iso_checksum": "sha512:33c08e56c83d13007e4a5511b9bf2c4926c4aa12fd5dd56d493c0653aecbab380988c5bf1671dbaea75c582827797d98c4a611f7fb2b131fbde2c677d5258ec9", + "iso_url": "https://download.cloudstack.org/systemvm/debian/debian-12.5.0-amd64-netinst.iso", "net_device": "virtio-net", "output_directory": "../dist", "qemuargs": [ diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py index 98923775fe63..04d4e6810c4f 100755 --- a/tools/marvin/marvin/lib/base.py +++ b/tools/marvin/marvin/lib/base.py @@ -4877,7 +4877,7 @@ def list(cls, apiclient, **kwargs): class NetworkServiceProvider: - """Manage network serivce providers for CloudStack""" + """Manage network service providers for CloudStack""" def __init__(self, items): self.__dict__.update(items) diff --git a/ui/public/config.json b/ui/public/config.json index ed64b15423bb..57d120aed5e3 100644 --- a/ui/public/config.json +++ b/ui/public/config.json @@ -14,6 +14,8 @@ "logo": "assets/logo.svg", "minilogo": "assets/mini-logo.svg", "banner": "assets/banner.svg", + "loginPageTitle": "CloudStack", + "loginPageFavicon": "assets/logo.svg", "error": { "403": "assets/403.png", "404": "assets/404.png", diff --git a/ui/public/index.html b/ui/public/index.html index 54ec6da25d10..1a09e46a4e05 100644 --- a/ui/public/index.html +++ b/ui/public/index.html @@ -22,8 +22,8 @@ - - Apache CloudStack + + Apache CloudStack